import pandas as pd
import numpy as np
from sklearn.neural_network import MLPClassifier
from sklearn import metrics
from sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV, cross_val_score,\
KFold, StratifiedShuffleSplit, ShuffleSplit, learning_curve
# preprocessing
from sklearn import preprocessing
# Visualisation libraries
## Text
from colorama import Fore, Back, Style
from IPython.display import Image, display, Markdown, Latex, clear_output
## plotly
from plotly.offline import init_notebook_mode, iplot
import plotly.graph_objs as go
import plotly.offline as py
from plotly.subplots import make_subplots
import plotly.express as px
## seaborn
import seaborn as sns
sns.set_style("whitegrid")
sns.set_context("paper", rc={"font.size":12,"axes.titlesize":14,"axes.labelsize":12})
## matplotlib
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from matplotlib.patches import Ellipse, Polygon
import matplotlib.gridspec as gridspec
import matplotlib.colors
from pylab import rcParams
from matplotlib.font_manager import FontProperties
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
plt.style.use('seaborn-whitegrid')
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (17, 6)
mpl.rcParams['axes.labelsize'] = 14
mpl.rcParams['xtick.labelsize'] = 12
mpl.rcParams['ytick.labelsize'] = 12
mpl.rcParams['text.color'] = 'k'
%matplotlib inline
import warnings
warnings.filterwarnings("ignore")
Anomaly detection is a classification process in which rare items, events, or observations in data sets are identified. Learn more about this here. In this article, we investigate Credit Card Fraud Detection dataset from Kaggle.com.
Credit card companies must be able to recognize fraudulent credit card transactions so that customers are not charged for items that they did not purchase.
The datasets contain transactions made by credit cards in September 2013 by European cardholders. This dataset presents transactions that occurred in two days, where we have 492 frauds out of 284,807 transactions. The dataset is highly unbalanced, the positive class (frauds) account for 0.172% of all transactions. It contains only numerical input variables which are the result of a PCA transformation. Unfortunately, due to confidentiality issues, we cannot provide the original features and more background information about the data. Features V1, V2, … V28 are the principal components obtained with PCA, the only features which have not been transformed with PCA are 'Time' and 'Amount'. Feature 'Time' contains the seconds elapsed between each transaction and the first transaction in the dataset. The feature 'Amount' is the transaction Amount, this feature can be used for example-dependant cost-sensitive learning. Feature 'Class' is the response variable and it takes value 1 in case of fraud and 0 otherwise.
Path ='Data/creditcard.csv'
Data = pd.read_csv(Path, sep=',')
Labels = ['Normal', 'Fraud']
Target = 'Class'
Col = []
# Temp = re.findall("(\d+)", s)
for s in Data.columns:
if any(map(str.isdigit, s)) == True:
Temp = s.split('V')
Col.append('V'+ Temp[-1].zfill(2))
else:
Col.append(s)
Data.columns = Col
del Col
display(pd.DataFrame(Data.shape, columns = ['Count'], index = ['Attributes', 'Instances']).T)
def Data_info(Inp, Only_NaN = False):
Out = Inp.dtypes.to_frame(name='Data Type').sort_values(by=['Data Type'])
Out = Out.join(Inp.isnull().sum().to_frame(name = 'Number of NaN Values'), how='outer')
Out ['Size'] = Inp.shape[0]
Out['Percentage'] = 100 - np.round(100*(Out['Number of NaN Values']/Inp.shape[0]),2)
Out.index.name = 'Features'
Out['Data Type'] = Out['Data Type'].astype(str)
if Only_NaN:
Out = Out.loc[Out['Number of NaN Values']>0]
return Out
data_info = Data_info(Data).reset_index(drop = False)
fig = px.bar(data_info, x= 'Features', y= 'Percentage', color = 'Data Type', text = 'Data Type',
color_discrete_sequence = ['PaleGreen', 'LightBlue', 'PeachPuff'], hover_data = data_info.columns)
fig.update_layout(plot_bgcolor= 'white', legend=dict(x=1, y=.5, traceorder="normal",
bordercolor="DarkGray", borderwidth=1), height = 400, width = 980)
fig.update_traces(texttemplate= 6*' ' + '%{label}', textposition='inside')
fig.update_traces(marker_line_color= 'Black', marker_line_width=1., opacity=1)
fig.show()
| Attributes | Instances | |
|---|---|---|
| Count | 284807 | 31 |
fig, ax = plt.subplots(1, 1, figsize=(16, 6))
_ = ax.hist(Data.loc[Data.Class == 0, 'Amount'], 100, color = '#34495e', hatch = '/', lw = 1.5,
edgecolor = '#3498db', label = Labels[0])
_ = ax.hist(Data.loc[Data.Class == 1, 'Amount'], 10, Color = '#e74c3c', hatch = '\\', lw = 1.5,
edgecolor = 'DarkRed', label = Labels[1])
_ = ax.set_xlabel('Amount')
_ = ax.set_ylabel('Frequency (Logarithm Scale)')
_ = ax.set_xlim([0, 2e4])
_ = ax.set_yscale('log')
_ = ax.set_ylim([0, 1e6])
_ = ax.legend(bbox_to_anchor=(1, 1), fontsize=14, ncol=2)
fig, ax = plt.subplots(1, 1, figsize=(16, 6))
_ = ax.scatter(Data.loc[Data.Class == 0, 'Time'], Data.loc[Data.Class == 0, 'Amount'], s= 30,
facecolors='SkyBlue', edgecolors='MidnightBlue', alpha = 0.8, label = Labels[0])
_ = ax.scatter(Data.loc[Data.Class == 1, 'Time'], Data.loc[Data.Class == 1, 'Amount'], s= 30,
facecolors='Orange', edgecolors='DarkRed', alpha = 1, label = Labels[1])
_ = ax.set_xlabel('Time (in seconds)')
_ = ax.set_ylabel('Amount')
_ = ax.set_xlim([-500, Data.Time.max()+500])
_ = ax.set_ylim([-250, 2e4])
_ = ax.legend(bbox_to_anchor=(1, 1), fontsize=14, ncol=2)
Let's look that transaction class distribution
def Dist_Table(Inp = Data, Target = Target):
Table = Inp[Target].value_counts().to_frame('Count').reset_index(drop = False).rename(columns = {'index':Target})
Table[Target] = Table[Target].replace(dict(zip([0,1],Labels)))
Table['Percentage'] = 100 - np.round(100*(Table['Count']/Table['Count'].sum()),2)
return Table
Table = Dist_Table()
def Dist_Plot(Table, PieColors = ['SeaGreen', 'FireBrick'], TableColors = ['Navy','White']):
fig = make_subplots(rows=1, cols=2, horizontal_spacing = 0.02, column_widths=[0.6, 0.4],
specs=[[{"type": "table"},{"type": "pie"}]])
# Right
fig.add_trace(go.Pie(labels=Table[Target].values, values=Table['Count'].values, pull=[0, 0.1], textfont=dict(size=16),
marker=dict(colors = PieColors, line=dict(color='black', width=1))), row=1, col=2)
fig.update_traces(hole=.5)
fig.update_layout(height = 400, legend=dict(orientation="v"), legend_title_text= Target,
annotations=[dict(text= '<b>' + Target + '<b>', x=0.835, y=0.5, font_size=14, showarrow=False)])
# Left
T = Table.copy()
T['Percentage'] = T['Percentage'].map(lambda x: '%.2f' % x)
Temp = []
for i in T.columns:
Temp.append(T.loc[:,i].values)
fig.add_trace(go.Table(header=dict(values = list(Table.columns), line_color='darkslategray',
fill_color= TableColors[0], align=['center','center'],
font=dict(color='white', size=12), height=25), columnwidth = [0.2, 0.2, 0.2],
cells=dict(values=Temp, line_color='darkslategray',
fill=dict(color= [TableColors[1], TableColors[1]]),
align=['center', 'center'], font_size=12, height=20)), 1, 1)
fig.update_layout(title={'text': '<b>' + Target + 'Distribution' + '<b>', 'x':0.5,
'y':0.90, 'xanchor': 'center', 'yanchor': 'top'})
fig.show()
Dist_Plot(Table)
The Dataset is quite large, we would like to use pandas DataFrame sample feature with using a one-tenth of the data as a sample.
df= Data.sample(frac = 0.1, random_state=1).reset_index(drop = True)
Dist_Plot(Dist_Table(df), PieColors = ['CornflowerBlue', 'OrangeRed'], TableColors = ['Purple','Lavenderblush'])
First off, let's define $X$ and $y$ sets.
X = df.drop(columns = [Target])
y = df[Target]
Moreover, high variance for some features can hurt our modeling process. For this reason, we would like to standardize features by removing the mean and scaling to unit variance. In this article, we demonstrated the benefits of scaling data using StandardScaler().
# scaling data
scaler = preprocessing.StandardScaler()
X_std = scaler.fit_transform(X)
X_std = pd.DataFrame(data = X_std, columns =X.columns)
del scaler
fig, ax = plt.subplots(2, 1, figsize=(18, 8))
ax = ax.ravel()
font = FontProperties()
font.set_weight('bold')
CP = [sns.color_palette("OrRd", 20), sns.color_palette("Greens", 20)]
Names = ['Variance of the Features', 'Variance of the Features (Standardized)']
Sets = [X, X_std]
kws = dict(label='Feature\nVariance', aspect=20, shrink= .3)
for i in range(len(ax)):
Temp = Sets[i].var().sort_values(ascending = False).to_frame(name= 'Variance').round(2).T
_ = sns.heatmap(Temp, ax=ax[i], annot=True, square=True, cmap = CP[i],
linewidths = 0.8, vmin=0, vmax=Temp.max(axis =1)[0], annot_kws={"size": 6},
cbar_kws=kws)
_ = ax[i].set_yticklabels('')
_ = ax[i].set_title(Names[i], fontproperties=font, fontsize = 16)
del Temp
X = X_std.copy()
del CP, Names, ax, fig, font, Sets, kws,
Now, let's take a look at the variance of the features.
fig, ax = plt.subplots(figsize=(17,20))
Temp = pd.concat([X, y], axis = 1)
Temp = Temp.corr().round(2)
Temp = Temp.loc[(Temp.index == Target)].drop(columns = Target).T.sort_values(by = Target).T
_ = sns.heatmap(Temp, ax=ax, annot=True, square=True, cmap =sns.color_palette("Greens", n_colors=10),
linewidths = 0.8, vmin=0, vmax=1,
annot_kws={"size": 12},
cbar_kws={'label': Target + ' Correlation', "aspect":40, "shrink": .4, "orientation": "horizontal"})
_ = ax.set_yticklabels('')
del Temp
Modifying dataset.
df[X.columns.tolist()] = X_std[X.columns.tolist()]
df.to_csv (Path.split(".")[0]+'_STD.csv', index = None, header=True)
StratifiedKFold is a variation of k-fold which returns stratified folds: each set contains approximately the same percentage of samples of each target class as the complete set.
Test_Size = 0.3
sss = StratifiedShuffleSplit(n_splits=1, test_size=Test_Size, random_state=42)
_ = sss.get_n_splits(X, y)
for train_index, test_index in sss.split(X, y):
X_train, X_test = X.loc[train_index], X.loc[test_index]
y_train, y_test = y[train_index], y[test_index]
del sss
Colors = ['SeaGreen', 'FireBrick']
nc = 2
fig = make_subplots(rows=1, cols=nc, specs=[[{'type':'domain'}]*nc])
fig.add_trace(go.Pie(labels=Labels,
values=y_train.value_counts().values,
pull=[0, 0.1],
name= 'Train Set',
textfont=dict(size=16),
marker= dict(colors = Colors, line=dict(color='black', width=1))), 1, 1)
fig.add_trace(go.Pie(labels=Labels,
values=y_test.value_counts().values,
pull=[0, 0.1],
name= 'Test Set',
textfont=dict(size=16),
marker= dict(colors = Colors, line=dict(color='black', width=1))), 1, 2)
fig.update_traces(hole=.5)
fig.update_layout(height = 400, legend=dict(orientation="v"),
legend_title_text= Target,
annotations=[dict(text= '<b>' + 'Train<br>Set' + '<b>', x=0.195, y=0.5, font_size=14, showarrow=False),
dict(text= '<b>' + 'Test<br>Set' + '<b>', x=0.8, y=0.5, font_size=14, showarrow=False)],
title={'text': '<b>' + Target + '<b>', 'x':0.48, 'y': .83, 'xanchor': 'center', 'yanchor': 'top'})
fig.show()
This model optimizes the log-loss function using LBFGS or stochastic gradient descent. See sklearn.neural_network.MLPClassifier.
def Header(Text, L = 100, C = 'Blue', T = 'White'):
BACK = {'Black': Back.BLACK, 'Red':Back.RED, 'Green':Back.GREEN, 'Yellow': Back.YELLOW, 'Blue': Back.BLUE,
'Magenta':Back.MAGENTA, 'Cyan': Back.CYAN}
FORE = {'Black': Fore.BLACK, 'Red':Fore.RED, 'Green':Fore.GREEN, 'Yellow':Fore.YELLOW, 'Blue':Fore.BLUE,
'Magenta':Fore.MAGENTA, 'Cyan':Fore.CYAN, 'White': Fore.WHITE}
print(BACK[C] + FORE[T] + Style.NORMAL + Text + Style.RESET_ALL + ' ' + FORE[C] +
Style.NORMAL + (L- len(Text) - 1)*'=' + Style.RESET_ALL)
def Line(L=100, C = 'Blue'):
FORE = {'Black': Fore.BLACK, 'Red':Fore.RED, 'Green':Fore.GREEN, 'Yellow':Fore.YELLOW, 'Blue':Fore.BLUE,
'Magenta':Fore.MAGENTA, 'Cyan':Fore.CYAN, 'White': Fore.WHITE}
print(FORE[C] + Style.NORMAL + L*'=' + Style.RESET_ALL)
def Scoring(model, X, y, n_splits = 20, RS = 42):
kfold = KFold(n_splits= n_splits, random_state= RS, shuffle = True)
ROC = cross_val_score(model, X, y, cv=kfold, scoring = 'roc_auc')
bACC = cross_val_score(model, X, y, cv=kfold, scoring = 'balanced_accuracy')
ROC = ROC[np.logical_not(np.isnan(ROC))]
bACC[np.logical_not(np.isnan(bACC))]
return ROC, bACC
def Performance_Table(model, X_train = X_train, y_train = y_train, X_test = X_test, y_test = y_test):
Cols = ['Set', 'ROC Accuracy', 'Balanced Accuracy']
ROC, bACC = Scoring(model, X = X_train, y = y_train)
data = ['Train Set', ('%.4f' % ROC.mean())+ ' ± ' + ('%.4f' % ROC.std()),
('%.4f' % bACC.mean())+ ' ± ' + ('%.4f' % bACC.std())]
Out = pd.DataFrame(data = data, index = Cols).T
ROC, bACC = Scoring(model, X = X_test, y = y_test)
data = ['Test Set', ('%.4f' % ROC.mean())+ ' ± ' + ('%.4f' % ROC.std()),
('%.4f' % bACC.mean())+ ' ± ' + ('%.4f' % bACC.std())]
Temp = pd.DataFrame(data = data, index = Cols).T
Out = pd.concat([Out, Temp]).reset_index(drop = True)
return Out
def Classification_Report_CV(model, X, y, n_splits = 20, CM_method = 'Sum'):
Reports = []
CM = []
def classification_report_with_accuracy_score(y_true, y_pred):
Reports.append(pd.DataFrame(metrics.classification_report(y_true, y_pred,
target_names = Labels, output_dict = True)).T.values)
CM.append(metrics.confusion_matrix(y_true, y_pred))
return metrics.accuracy_score(y_true, y_pred)
cross_val_score(model, X=X, y=y, cv=StratifiedShuffleSplit(n_splits=n_splits, random_state=42),\
scoring=metrics.make_scorer(classification_report_with_accuracy_score))
Reports_All = Reports[0].ravel()
CM_All = CM[0].ravel()
for i in range(1, len(Reports)):
Reports_All = np.vstack((Reports_All, Reports[i].ravel()))
for i in range(1, len(CM)):
CM_All = np.vstack((CM_All, CM[i].ravel()))
R = pd.DataFrame(metrics.classification_report(y_train, model.predict(X_train),
target_names = Labels, output_dict = True)).T
Mean = pd.DataFrame(Reports_All.mean(axis = 0).reshape(R.shape), index = R.index, columns = R.columns)
STD = pd.DataFrame(Reports_All.std(axis = 0).reshape(R.shape), index = R.index, columns = R.columns)
Reports = Mean.applymap(lambda x: ('%.4f' % x))+ ' ± ' +STD.applymap(lambda x: ('%.4f' % x))
if CM_method == 'Sum':
CM = CM_All.mean(axis = 0).reshape(CM[0].shape).round(0).astype(int)
else:
CM = CM_All.sum(axis = 0).reshape(CM[0].shape).round(0).astype(int)
Reports.index.name = 'CV = % i' % n_splits
return CM, Reports
def Confusion_Mat(CM_Train, CM_Test, n_splits = 20):
# Font
font = FontProperties()
font.set_weight('bold')
Titles = ['Train Set (CV = % i)' % n_splits, 'Test Set (CV = % i)' % n_splits]
CM = [CM_Train, CM_Test]
for i in range(2):
fig, ax = plt.subplots(1, 2, figsize=(12, 4))
fig.suptitle(Titles[i], fontproperties=font, fontsize = 16)
_ = sns.heatmap(CM[i], annot=True, annot_kws={"size": 14}, cmap="Blues", ax = ax[0],
linewidths = 0.2, cbar_kws={"shrink": 1})
_ = ax[0].set_title('Confusion Matrix');
_ = sns.heatmap(CM[i].astype('float') / CM[i].sum(axis=1)[:, np.newaxis],
annot=True, annot_kws={"size": 14}, cmap="Greens", ax = ax[1],
linewidths = 0.2, vmin=0, vmax=1, cbar_kws={"shrink": 1})
_ = ax[1].set_title('Normalized Confusion Matrix');
for a in ax:
_ = a.set_xlabel('Predicted labels')
_ = a.set_ylabel('True labels');
_ = a.xaxis.set_ticklabels(Labels)
_ = a.yaxis.set_ticklabels(Labels)
_ = a.set_aspect(1)
def Tables_and_Plots(model, X_train = X_train, y_train = y_train, X_test = X_test, y_test = y_test, n_splits = 20):
display(Performance_Table(model).style.hide_index())
Header('Train Set', C = 'Green')
CM_Train, R_Train = Classification_Report_CV(model, X=X_train, y=y_train, n_splits = n_splits)
display(R_Train)
Header('Test Set', C = 'Red')
CM_Test, R_Test = Classification_Report_CV(model, X=X_test, y=y_test, n_splits = n_splits)
display(R_Test)
Line()
Confusion_Mat(CM_Train, CM_Test, n_splits = n_splits)
def Grid_Table(grid):
Temp = [str(x) for x in grid.cv_results_['params']]
Temp = [s.replace('{', '').replace('}', '').replace("'", '') for s in Temp]
Table = pd.DataFrame({'rank_test_score': grid.cv_results_['rank_test_score'],
'params':Temp,
'mean_test_score': grid.cv_results_['mean_test_score'],
'mean_fit_time': grid.cv_results_['mean_fit_time']})
Table = Table.round(4).sort_values('rank_test_score').set_index('rank_test_score')
return Table
def Grid_Performance_Plot(Table):
font = FontProperties()
font.set_weight('bold')
fig, axes = plt.subplots(1, 2, figsize=(16, 6))
Z = zip(axes, ['mean_test_score', 'mean_fit_time'], ['Blue', 'Red'],['Classification Accuracy', 'Fit Time (with caching)'])
for ax, col, c, title in Z:
_ = ax.errorbar(x = Table['params'], y = Table[col], yerr = Table[col], color = c)
_ = ax.set_xticklabels(labels = Table['params'],rotation=90, fontsize = 10)
_ = ax.set_ylim(bottom = 0)
_ = ax.set_xlabel('Paramerers')
_ = ax.set_title(title, fontproperties=font, fontsize = 14)
def Best_Parm(model, param_dist, Top = None,
X_train = X_train, y_train= y_train, X_test = X_test, y_test = y_test):
grid = RandomizedSearchCV(estimator = model, param_distributions = param_dist,
cv = KFold(n_splits = 20, shuffle = True),
n_iter = int(1e3),
scoring = 'precision',
error_score = 0,
verbose = 0,
n_jobs = 10,
return_train_score = True)
_ = grid.fit(X_train, y_train)
display(pd.DataFrame({'Best Score': [grid.best_score_],
'Best Paramerers': [str(grid.best_params_)],
'Precision': [grid.score(X_test,y_test)]}).round(4).style.hide_index().set_precision(4))
Table = Grid_Table(grid)
if Top == None:
Top = Table.shape[0]
display(Table.reset_index(drop = False).head(Top).style.hide_index().\
set_precision(4).background_gradient(subset= ['mean_test_score'], cmap='Greens').\
background_gradient(subset= ['mean_fit_time'], cmap='Oranges'))
Grid_Performance_Plot(Table)
return grid
Some of the metrics that we use here to mesure the accuracy: \begin{align} \text{Confusion Matrix} = \begin{bmatrix}T_p & F_p\\ F_n & T_n\end{bmatrix}. \end{align}
where $T_p$, $T_n$, $F_p$, and $F_n$ represent true positive, true negative, false positive, and false negative, respectively.
\begin{align} \text{Precision} &= \frac{T_{p}}{T_{p} + F_{p}},\\ \text{Recall} &= \frac{T_{p}}{T_{p} + F_{n}},\\ \text{F1} &= \frac{2 \times \text{Precision} \times \text{Recall}}{\text{Precision} + \text{Recall}}\\ \text{Balanced-Accuracy (bACC)} &= \frac{1}{2}\left( \frac{T_{p}}{T_{p} + F_{n}} + \frac{T_{n}}{T_{n} + F_{p}}\right ) \end{align}The accuracy can be a misleading metric for imbalanced data sets. In these cases, a balanced accuracy (bACC) [6] is recommended that normalizes true positive and true negative predictions by the number of positive and negative samples, respectively, and divides their sum by two.
Header('MLP with Default Parameters')
MLP = MLPClassifier(max_iter = 1000)
print('Default Parameters = %s' % MLP.get_params(deep=True))
_ = MLP.fit(X_train, y_train)
Tables_and_Plots(MLP, n_splits = 20)
MLP with Default Parameters ======================================================================== Default Parameters = {'activation': 'relu', 'alpha': 0.0001, 'batch_size': 'auto', 'beta_1': 0.9, 'beta_2': 0.999, 'early_stopping': False, 'epsilon': 1e-08, 'hidden_layer_sizes': (100,), 'learning_rate': 'constant', 'learning_rate_init': 0.001, 'max_fun': 15000, 'max_iter': 1000, 'momentum': 0.9, 'n_iter_no_change': 10, 'nesterovs_momentum': True, 'power_t': 0.5, 'random_state': None, 'shuffle': True, 'solver': 'adam', 'tol': 0.0001, 'validation_fraction': 0.1, 'verbose': False, 'warm_start': False}
| Set | ROC Accuracy | Balanced Accuracy |
|---|---|---|
| Train Set | 0.9100 ± 0.1484 | 0.8579 ± 0.1826 |
| Test Set | 0.9377 ± 0.1662 | 0.9000 ± 0.2000 |
Train Set ==========================================================================================
| precision | recall | f1-score | support | |
|---|---|---|---|---|
| CV = 20 | ||||
| Normal | 0.9994 ± 0.0004 | 0.9999 ± 0.0002 | 0.9996 ± 0.0003 | 1991.0000 ± 0.0000 |
| Fraud | 0.8875 ± 0.2559 | 0.5833 ± 0.2764 | 0.6779 ± 0.2485 | 3.0000 ± 0.0000 |
| accuracy | 0.9993 ± 0.0005 | 0.9993 ± 0.0005 | 0.9993 ± 0.0005 | 0.9993 ± 0.0005 |
| macro avg | 0.9434 ± 0.1280 | 0.7916 ± 0.1382 | 0.8387 ± 0.1244 | 1994.0000 ± 0.0000 |
| weighted avg | 0.9992 ± 0.0007 | 0.9993 ± 0.0005 | 0.9992 ± 0.0006 | 1994.0000 ± 0.0000 |
Test Set ===========================================================================================
| precision | recall | f1-score | support | |
|---|---|---|---|---|
| CV = 20 | ||||
| Normal | 0.9991 ± 0.0007 | 1.0000 ± 0.0000 | 0.9996 ± 0.0004 | 853.0000 ± 0.0000 |
| Fraud | 0.9000 ± 0.3000 | 0.6250 ± 0.3112 | 0.7167 ± 0.2843 | 2.0000 ± 0.0000 |
| accuracy | 0.9991 ± 0.0007 | 0.9991 ± 0.0007 | 0.9991 ± 0.0007 | 0.9991 ± 0.0007 |
| macro avg | 0.9496 ± 0.1502 | 0.8125 ± 0.1556 | 0.8581 ± 0.1423 | 855.0000 ± 0.0000 |
| weighted avg | 0.9989 ± 0.0013 | 0.9991 ± 0.0007 | 0.9989 ± 0.0010 | 855.0000 ± 0.0000 |
====================================================================================================
Tables_and_Plots(MLP, n_splits = 30)
| Set | ROC Accuracy | Balanced Accuracy |
|---|---|---|
| Train Set | 0.9134 ± 0.1424 | 0.8528 ± 0.1881 |
| Test Set | 0.9388 ± 0.1657 | 0.8938 ± 0.1987 |
Train Set ==========================================================================================
| precision | recall | f1-score | support | |
|---|---|---|---|---|
| CV = 30 | ||||
| Normal | 0.9994 ± 0.0004 | 0.9999 ± 0.0002 | 0.9997 ± 0.0003 | 1991.0000 ± 0.0000 |
| Fraud | 0.8889 ± 0.2656 | 0.6222 ± 0.2820 | 0.7060 ± 0.2543 | 3.0000 ± 0.0000 |
| accuracy | 0.9993 ± 0.0005 | 0.9993 ± 0.0005 | 0.9993 ± 0.0005 | 0.9993 ± 0.0005 |
| macro avg | 0.9442 ± 0.1329 | 0.8111 ± 0.1410 | 0.8528 ± 0.1273 | 1994.0000 ± 0.0000 |
| weighted avg | 0.9993 ± 0.0007 | 0.9993 ± 0.0005 | 0.9992 ± 0.0006 | 1994.0000 ± 0.0000 |
Test Set ===========================================================================================
| precision | recall | f1-score | support | |
|---|---|---|---|---|
| CV = 30 | ||||
| Normal | 0.9989 ± 0.0008 | 0.9999 ± 0.0005 | 0.9994 ± 0.0004 | 853.0000 ± 0.0000 |
| Fraud | 0.7667 ± 0.4028 | 0.5333 ± 0.3399 | 0.6056 ± 0.3347 | 2.0000 ± 0.0000 |
| accuracy | 0.9988 ± 0.0008 | 0.9988 ± 0.0008 | 0.9988 ± 0.0008 | 0.9988 ± 0.0008 |
| macro avg | 0.8828 ± 0.2017 | 0.7666 ± 0.1699 | 0.8025 ± 0.1675 | 855.0000 ± 0.0000 |
| weighted avg | 0.9984 ± 0.0016 | 0.9988 ± 0.0008 | 0.9985 ± 0.0012 | 855.0000 ± 0.0000 |
====================================================================================================
param_dist = {'solver': ['lbfgs', 'sgd', 'adam'],
'alpha': [10.0**x for x in np.arange(-1,-4,-1)],
'learning_rate' : ['constant', 'invscaling', 'adaptive']}
Header('MLP with the Best Parameters')
grid_model = Best_Parm(model = MLP, param_dist = param_dist)
MLP with the Best Parameters =======================================================================
| Best Score | Best Paramerers | Precision |
|---|---|---|
| 0.7500 | {'solver': 'adam', 'learning_rate': 'invscaling', 'alpha': 0.001} | 1.0000 |
| rank_test_score | params | mean_test_score | mean_fit_time |
|---|---|---|---|
| 1 | solver: adam, learning_rate: invscaling, alpha: 0.001 | 0.7500 | 3.8384 |
| 2 | solver: adam, learning_rate: invscaling, alpha: 0.1 | 0.7250 | 4.5908 |
| 2 | solver: lbfgs, learning_rate: adaptive, alpha: 0.1 | 0.7250 | 2.8548 |
| 4 | solver: adam, learning_rate: constant, alpha: 0.001 | 0.7000 | 3.8623 |
| 4 | solver: adam, learning_rate: invscaling, alpha: 0.01 | 0.7000 | 4.4159 |
| 6 | solver: lbfgs, learning_rate: constant, alpha: 0.1 | 0.6833 | 3.2999 |
| 7 | solver: adam, learning_rate: adaptive, alpha: 0.01 | 0.6750 | 4.5132 |
| 7 | solver: adam, learning_rate: adaptive, alpha: 0.001 | 0.6750 | 3.5510 |
| 9 | solver: lbfgs, learning_rate: invscaling, alpha: 0.1 | 0.6583 | 2.8100 |
| 9 | solver: lbfgs, learning_rate: invscaling, alpha: 0.01 | 0.6583 | 2.9163 |
| 11 | solver: lbfgs, learning_rate: adaptive, alpha: 0.001 | 0.6500 | 2.8919 |
| 12 | solver: lbfgs, learning_rate: constant, alpha: 0.01 | 0.6333 | 2.7863 |
| 13 | solver: lbfgs, learning_rate: adaptive, alpha: 0.01 | 0.6250 | 2.7095 |
| 14 | solver: lbfgs, learning_rate: invscaling, alpha: 0.001 | 0.6083 | 2.8218 |
| 15 | solver: adam, learning_rate: constant, alpha: 0.1 | 0.5750 | 4.7089 |
| 15 | solver: adam, learning_rate: adaptive, alpha: 0.1 | 0.5750 | 5.1043 |
| 15 | solver: adam, learning_rate: constant, alpha: 0.01 | 0.5750 | 4.5742 |
| 18 | solver: sgd, learning_rate: constant, alpha: 0.1 | 0.5500 | 4.2863 |
| 19 | solver: lbfgs, learning_rate: constant, alpha: 0.001 | 0.5483 | 2.8562 |
| 20 | solver: sgd, learning_rate: constant, alpha: 0.01 | 0.5250 | 4.3245 |
| 21 | solver: sgd, learning_rate: adaptive, alpha: 0.01 | 0.5000 | 8.3043 |
| 22 | solver: sgd, learning_rate: constant, alpha: 0.001 | 0.4750 | 4.2662 |
| 23 | solver: sgd, learning_rate: adaptive, alpha: 0.001 | 0.4500 | 8.5791 |
| 24 | solver: sgd, learning_rate: adaptive, alpha: 0.1 | 0.4250 | 8.4027 |
| 25 | solver: sgd, learning_rate: invscaling, alpha: 0.001 | 0.0000 | 4.7036 |
| 25 | solver: sgd, learning_rate: invscaling, alpha: 0.1 | 0.0000 | 4.6630 |
| 25 | solver: sgd, learning_rate: invscaling, alpha: 0.01 | 0.0000 | 4.6423 |
Since we have identified the best parameters for our modeling, we train another model using these parameters.
Header('MLP with the Best Parameters')
MLP = MLPClassifier(max_iter = 1000, alpha = grid_model.best_params_['alpha'],
learning_rate = grid_model.best_params_['learning_rate'],
solver = grid_model.best_params_['solver'],
verbose= True)
print('Best Parameters = %s' % MLP.get_params(deep=True))
_ = MLP.fit(X_train, y_train)
Tables_and_Plots(MLP, n_splits = 20)
MLP with the Best Parameters ======================================================================= Best Parameters = {'activation': 'relu', 'alpha': 0.001, 'batch_size': 'auto', 'beta_1': 0.9, 'beta_2': 0.999, 'early_stopping': False, 'epsilon': 1e-08, 'hidden_layer_sizes': (100,), 'learning_rate': 'invscaling', 'learning_rate_init': 0.001, 'max_fun': 15000, 'max_iter': 1000, 'momentum': 0.9, 'n_iter_no_change': 10, 'nesterovs_momentum': True, 'power_t': 0.5, 'random_state': None, 'shuffle': True, 'solver': 'adam', 'tol': 0.0001, 'validation_fraction': 0.1, 'verbose': True, 'warm_start': False} Iteration 1, loss = 0.35589392 Iteration 2, loss = 0.05258751 Iteration 3, loss = 0.02221151 Iteration 4, loss = 0.01366261 Iteration 5, loss = 0.00998538 Iteration 6, loss = 0.00792757 Iteration 7, loss = 0.00675078 Iteration 8, loss = 0.00589274 Iteration 9, loss = 0.00517375 Iteration 10, loss = 0.00468163 Iteration 11, loss = 0.00427851 Iteration 12, loss = 0.00395347 Iteration 13, loss = 0.00365398 Iteration 14, loss = 0.00343790 Iteration 15, loss = 0.00319606 Iteration 16, loss = 0.00296092 Iteration 17, loss = 0.00292615 Iteration 18, loss = 0.00261165 Iteration 19, loss = 0.00260920 Iteration 20, loss = 0.00252948 Iteration 21, loss = 0.00226260 Iteration 22, loss = 0.00225923 Iteration 23, loss = 0.00205607 Iteration 24, loss = 0.00206672 Iteration 25, loss = 0.00189286 Iteration 26, loss = 0.00191885 Iteration 27, loss = 0.00182960 Iteration 28, loss = 0.00183673 Iteration 29, loss = 0.00169863 Iteration 30, loss = 0.00168482 Iteration 31, loss = 0.00158500 Iteration 32, loss = 0.00154374 Iteration 33, loss = 0.00142155 Iteration 34, loss = 0.00143501 Iteration 35, loss = 0.00133662 Iteration 36, loss = 0.00135999 Iteration 37, loss = 0.00129457 Iteration 38, loss = 0.00127094 Iteration 39, loss = 0.00117880 Iteration 40, loss = 0.00120347 Iteration 41, loss = 0.00124829 Iteration 42, loss = 0.00117618 Iteration 43, loss = 0.00107198 Iteration 44, loss = 0.00102603 Iteration 45, loss = 0.00100391 Iteration 46, loss = 0.00100060 Iteration 47, loss = 0.00100871 Iteration 48, loss = 0.00093701 Iteration 49, loss = 0.00089416 Iteration 50, loss = 0.00090504 Iteration 51, loss = 0.00085654 Iteration 52, loss = 0.00083982 Iteration 53, loss = 0.00085875 Iteration 54, loss = 0.00082907 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.25627896 Iteration 2, loss = 0.03888364 Iteration 3, loss = 0.01704800 Iteration 4, loss = 0.01059528 Iteration 5, loss = 0.00777191 Iteration 6, loss = 0.00629769 Iteration 7, loss = 0.00537026 Iteration 8, loss = 0.00471320 Iteration 9, loss = 0.00417622 Iteration 10, loss = 0.00372886 Iteration 11, loss = 0.00359520 Iteration 12, loss = 0.00324340 Iteration 13, loss = 0.00298081 Iteration 14, loss = 0.00292443 Iteration 15, loss = 0.00274733 Iteration 16, loss = 0.00255253 Iteration 17, loss = 0.00243380 Iteration 18, loss = 0.00234783 Iteration 19, loss = 0.00221611 Iteration 20, loss = 0.00213715 Iteration 21, loss = 0.00200058 Iteration 22, loss = 0.00192478 Iteration 23, loss = 0.00186506 Iteration 24, loss = 0.00184124 Iteration 25, loss = 0.00188396 Iteration 26, loss = 0.00170328 Iteration 27, loss = 0.00156424 Iteration 28, loss = 0.00167449 Iteration 29, loss = 0.00145335 Iteration 30, loss = 0.00138375 Iteration 31, loss = 0.00144885 Iteration 32, loss = 0.00128540 Iteration 33, loss = 0.00134807 Iteration 34, loss = 0.00121068 Iteration 35, loss = 0.00118334 Iteration 36, loss = 0.00112537 Iteration 37, loss = 0.00110133 Iteration 38, loss = 0.00105151 Iteration 39, loss = 0.00110683 Iteration 40, loss = 0.00099571 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.27634447 Iteration 2, loss = 0.04025315 Iteration 3, loss = 0.01804640 Iteration 4, loss = 0.01145670 Iteration 5, loss = 0.00864963 Iteration 6, loss = 0.00695813 Iteration 7, loss = 0.00597440 Iteration 8, loss = 0.00526248 Iteration 9, loss = 0.00493908 Iteration 10, loss = 0.00434116 Iteration 11, loss = 0.00398109 Iteration 12, loss = 0.00368435 Iteration 13, loss = 0.00344394 Iteration 14, loss = 0.00323928 Iteration 15, loss = 0.00317846 Iteration 16, loss = 0.00286165 Iteration 17, loss = 0.00283276 Iteration 18, loss = 0.00261930 Iteration 19, loss = 0.00237351 Iteration 20, loss = 0.00245855 Iteration 21, loss = 0.00227870 Iteration 22, loss = 0.00225795 Iteration 23, loss = 0.00208721 Iteration 24, loss = 0.00200502 Iteration 25, loss = 0.00205784 Iteration 26, loss = 0.00173599 Iteration 27, loss = 0.00177620 Iteration 28, loss = 0.00178384 Iteration 29, loss = 0.00185338 Iteration 30, loss = 0.00176369 Iteration 31, loss = 0.00164165 Iteration 32, loss = 0.00155549 Iteration 33, loss = 0.00143641 Iteration 34, loss = 0.00141658 Iteration 35, loss = 0.00133084 Iteration 36, loss = 0.00132719 Iteration 37, loss = 0.00132909 Iteration 38, loss = 0.00123476 Iteration 39, loss = 0.00119319 Iteration 40, loss = 0.00123823 Iteration 41, loss = 0.00116348 Iteration 42, loss = 0.00116352 Iteration 43, loss = 0.00109505 Iteration 44, loss = 0.00114758 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.24111855 Iteration 2, loss = 0.03722662 Iteration 3, loss = 0.01724225 Iteration 4, loss = 0.01117245 Iteration 5, loss = 0.00845245 Iteration 6, loss = 0.00696422 Iteration 7, loss = 0.00590166 Iteration 8, loss = 0.00523699 Iteration 9, loss = 0.00461968 Iteration 10, loss = 0.00421660 Iteration 11, loss = 0.00379322 Iteration 12, loss = 0.00347448 Iteration 13, loss = 0.00323200 Iteration 14, loss = 0.00311543 Iteration 15, loss = 0.00281694 Iteration 16, loss = 0.00261576 Iteration 17, loss = 0.00253301 Iteration 18, loss = 0.00239984 Iteration 19, loss = 0.00227240 Iteration 20, loss = 0.00223879 Iteration 21, loss = 0.00200770 Iteration 22, loss = 0.00190321 Iteration 23, loss = 0.00204472 Iteration 24, loss = 0.00175129 Iteration 25, loss = 0.00167842 Iteration 26, loss = 0.00162394 Iteration 27, loss = 0.00158130 Iteration 28, loss = 0.00156825 Iteration 29, loss = 0.00174966 Iteration 30, loss = 0.00143203 Iteration 31, loss = 0.00135471 Iteration 32, loss = 0.00129932 Iteration 33, loss = 0.00122347 Iteration 34, loss = 0.00143081 Iteration 35, loss = 0.00160660 Iteration 36, loss = 0.00128848 Iteration 37, loss = 0.00115227 Iteration 38, loss = 0.00106881 Iteration 39, loss = 0.00108358 Iteration 40, loss = 0.00100122 Iteration 41, loss = 0.00103491 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.17481962 Iteration 2, loss = 0.02772310 Iteration 3, loss = 0.01379826 Iteration 4, loss = 0.00933654 Iteration 5, loss = 0.00721451 Iteration 6, loss = 0.00599685 Iteration 7, loss = 0.00508818 Iteration 8, loss = 0.00454729 Iteration 9, loss = 0.00397084 Iteration 10, loss = 0.00367470 Iteration 11, loss = 0.00338085 Iteration 12, loss = 0.00306326 Iteration 13, loss = 0.00284976 Iteration 14, loss = 0.00265801 Iteration 15, loss = 0.00248527 Iteration 16, loss = 0.00223887 Iteration 17, loss = 0.00224258 Iteration 18, loss = 0.00205273 Iteration 19, loss = 0.00199054 Iteration 20, loss = 0.00193374 Iteration 21, loss = 0.00188218 Iteration 22, loss = 0.00177134 Iteration 23, loss = 0.00165354 Iteration 24, loss = 0.00165984 Iteration 25, loss = 0.00152025 Iteration 26, loss = 0.00151833 Iteration 27, loss = 0.00143659 Iteration 28, loss = 0.00139623 Iteration 29, loss = 0.00138890 Iteration 30, loss = 0.00136712 Iteration 31, loss = 0.00124739 Iteration 32, loss = 0.00128905 Iteration 33, loss = 0.00121225 Iteration 34, loss = 0.00118119 Iteration 35, loss = 0.00111116 Iteration 36, loss = 0.00104089 Iteration 37, loss = 0.00099395 Iteration 38, loss = 0.00101501 Iteration 39, loss = 0.00104957 Iteration 40, loss = 0.00090133 Iteration 41, loss = 0.00088798 Iteration 42, loss = 0.00087640 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.33596679 Iteration 2, loss = 0.05237988 Iteration 3, loss = 0.02236023 Iteration 4, loss = 0.01374428 Iteration 5, loss = 0.01004672 Iteration 6, loss = 0.00802332 Iteration 7, loss = 0.00676913 Iteration 8, loss = 0.00589335 Iteration 9, loss = 0.00525202 Iteration 10, loss = 0.00476774 Iteration 11, loss = 0.00428117 Iteration 12, loss = 0.00407287 Iteration 13, loss = 0.00374333 Iteration 14, loss = 0.00354866 Iteration 15, loss = 0.00335353 Iteration 16, loss = 0.00307505 Iteration 17, loss = 0.00302299 Iteration 18, loss = 0.00280303 Iteration 19, loss = 0.00269668 Iteration 20, loss = 0.00256687 Iteration 21, loss = 0.00241918 Iteration 22, loss = 0.00232223 Iteration 23, loss = 0.00221434 Iteration 24, loss = 0.00216596 Iteration 25, loss = 0.00205708 Iteration 26, loss = 0.00197778 Iteration 27, loss = 0.00190707 Iteration 28, loss = 0.00186112 Iteration 29, loss = 0.00180106 Iteration 30, loss = 0.00170084 Iteration 31, loss = 0.00166228 Iteration 32, loss = 0.00160210 Iteration 33, loss = 0.00150621 Iteration 34, loss = 0.00146307 Iteration 35, loss = 0.00149867 Iteration 36, loss = 0.00142416 Iteration 37, loss = 0.00134745 Iteration 38, loss = 0.00128503 Iteration 39, loss = 0.00124655 Iteration 40, loss = 0.00128026 Iteration 41, loss = 0.00127068 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.17122988 Iteration 2, loss = 0.02750449 Iteration 3, loss = 0.01358705 Iteration 4, loss = 0.00930335 Iteration 5, loss = 0.00721540 Iteration 6, loss = 0.00607428 Iteration 7, loss = 0.00524918 Iteration 8, loss = 0.00467748 Iteration 9, loss = 0.00412476 Iteration 10, loss = 0.00373294 Iteration 11, loss = 0.00346732 Iteration 12, loss = 0.00319458 Iteration 13, loss = 0.00308737 Iteration 14, loss = 0.00276900 Iteration 15, loss = 0.00262364 Iteration 16, loss = 0.00243918 Iteration 17, loss = 0.00239738 Iteration 18, loss = 0.00225024 Iteration 19, loss = 0.00212801 Iteration 20, loss = 0.00202556 Iteration 21, loss = 0.00201080 Iteration 22, loss = 0.00189769 Iteration 23, loss = 0.00175144 Iteration 24, loss = 0.00170833 Iteration 25, loss = 0.00173036 Iteration 26, loss = 0.00164187 Iteration 27, loss = 0.00151443 Iteration 28, loss = 0.00145002 Iteration 29, loss = 0.00139563 Iteration 30, loss = 0.00149036 Iteration 31, loss = 0.00130628 Iteration 32, loss = 0.00124448 Iteration 33, loss = 0.00127669 Iteration 34, loss = 0.00120915 Iteration 35, loss = 0.00116337 Iteration 36, loss = 0.00111440 Iteration 37, loss = 0.00107176 Iteration 38, loss = 0.00108068 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.14013217 Iteration 2, loss = 0.02347695 Iteration 3, loss = 0.01188832 Iteration 4, loss = 0.00819298 Iteration 5, loss = 0.00651749 Iteration 6, loss = 0.00538013 Iteration 7, loss = 0.00486696 Iteration 8, loss = 0.00440601 Iteration 9, loss = 0.00381403 Iteration 10, loss = 0.00362929 Iteration 11, loss = 0.00321180 Iteration 12, loss = 0.00310125 Iteration 13, loss = 0.00299152 Iteration 14, loss = 0.00291960 Iteration 15, loss = 0.00255023 Iteration 16, loss = 0.00239856 Iteration 17, loss = 0.00248432 Iteration 18, loss = 0.00226234 Iteration 19, loss = 0.00205429 Iteration 20, loss = 0.00197066 Iteration 21, loss = 0.00197498 Iteration 22, loss = 0.00179587 Iteration 23, loss = 0.00169734 Iteration 24, loss = 0.00161292 Iteration 25, loss = 0.00157564 Iteration 26, loss = 0.00147374 Iteration 27, loss = 0.00141634 Iteration 28, loss = 0.00139410 Iteration 29, loss = 0.00136754 Iteration 30, loss = 0.00157799 Iteration 31, loss = 0.00123608 Iteration 32, loss = 0.00127631 Iteration 33, loss = 0.00129005 Iteration 34, loss = 0.00126681 Iteration 35, loss = 0.00108384 Iteration 36, loss = 0.00100027 Iteration 37, loss = 0.00097855 Iteration 38, loss = 0.00109271 Iteration 39, loss = 0.00099116 Iteration 40, loss = 0.00093422 Iteration 41, loss = 0.00088090 Iteration 42, loss = 0.00083569 Iteration 43, loss = 0.00082659 Iteration 44, loss = 0.00079312 Iteration 45, loss = 0.00076793 Iteration 46, loss = 0.00085659 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.20869856 Iteration 2, loss = 0.03390500 Iteration 3, loss = 0.01548657 Iteration 4, loss = 0.00974265 Iteration 5, loss = 0.00729190 Iteration 6, loss = 0.00572345 Iteration 7, loss = 0.00470874 Iteration 8, loss = 0.00412729 Iteration 9, loss = 0.00351451 Iteration 10, loss = 0.00315360 Iteration 11, loss = 0.00283158 Iteration 12, loss = 0.00264353 Iteration 13, loss = 0.00223532 Iteration 14, loss = 0.00211869 Iteration 15, loss = 0.00195122 Iteration 16, loss = 0.00176486 Iteration 17, loss = 0.00163013 Iteration 18, loss = 0.00156935 Iteration 19, loss = 0.00148423 Iteration 20, loss = 0.00141028 Iteration 21, loss = 0.00141909 Iteration 22, loss = 0.00124522 Iteration 23, loss = 0.00113114 Iteration 24, loss = 0.00107302 Iteration 25, loss = 0.00111565 Iteration 26, loss = 0.00109686 Iteration 27, loss = 0.00109756 Iteration 28, loss = 0.00092088 Iteration 29, loss = 0.00090961 Iteration 30, loss = 0.00090768 Iteration 31, loss = 0.00084881 Iteration 32, loss = 0.00078559 Iteration 33, loss = 0.00077199 Iteration 34, loss = 0.00078585 Iteration 35, loss = 0.00070550 Iteration 36, loss = 0.00067746 Iteration 37, loss = 0.00065857 Iteration 38, loss = 0.00065758 Iteration 39, loss = 0.00075581 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.29212727 Iteration 2, loss = 0.04496829 Iteration 3, loss = 0.02030840 Iteration 4, loss = 0.01303352 Iteration 5, loss = 0.00973697 Iteration 6, loss = 0.00776181 Iteration 7, loss = 0.00654825 Iteration 8, loss = 0.00573093 Iteration 9, loss = 0.00506911 Iteration 10, loss = 0.00469158 Iteration 11, loss = 0.00428874 Iteration 12, loss = 0.00388722 Iteration 13, loss = 0.00355029 Iteration 14, loss = 0.00334054 Iteration 15, loss = 0.00314040 Iteration 16, loss = 0.00293563 Iteration 17, loss = 0.00284914 Iteration 18, loss = 0.00261855 Iteration 19, loss = 0.00252736 Iteration 20, loss = 0.00238542 Iteration 21, loss = 0.00233538 Iteration 22, loss = 0.00225206 Iteration 23, loss = 0.00206867 Iteration 24, loss = 0.00208307 Iteration 25, loss = 0.00196139 Iteration 26, loss = 0.00186112 Iteration 27, loss = 0.00183927 Iteration 28, loss = 0.00167268 Iteration 29, loss = 0.00166675 Iteration 30, loss = 0.00159960 Iteration 31, loss = 0.00163725 Iteration 32, loss = 0.00152384 Iteration 33, loss = 0.00170268 Iteration 34, loss = 0.00176102 Iteration 35, loss = 0.00153920 Iteration 36, loss = 0.00130490 Iteration 37, loss = 0.00150927 Iteration 38, loss = 0.00121510 Iteration 39, loss = 0.00116168 Iteration 40, loss = 0.00117748 Iteration 41, loss = 0.00113139 Iteration 42, loss = 0.00108179 Iteration 43, loss = 0.00111626 Iteration 44, loss = 0.00101435 Iteration 45, loss = 0.00100210 Iteration 46, loss = 0.00096314 Iteration 47, loss = 0.00091146 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.27402968 Iteration 2, loss = 0.04326730 Iteration 3, loss = 0.02010872 Iteration 4, loss = 0.01283040 Iteration 5, loss = 0.00963594 Iteration 6, loss = 0.00765072 Iteration 7, loss = 0.00654896 Iteration 8, loss = 0.00566495 Iteration 9, loss = 0.00509983 Iteration 10, loss = 0.00446635 Iteration 11, loss = 0.00423469 Iteration 12, loss = 0.00380762 Iteration 13, loss = 0.00361089 Iteration 14, loss = 0.00324120 Iteration 15, loss = 0.00329426 Iteration 16, loss = 0.00304045 Iteration 17, loss = 0.00286072 Iteration 18, loss = 0.00266536 Iteration 19, loss = 0.00266027 Iteration 20, loss = 0.00259405 Iteration 21, loss = 0.00234628 Iteration 22, loss = 0.00218003 Iteration 23, loss = 0.00210687 Iteration 24, loss = 0.00213500 Iteration 25, loss = 0.00195871 Iteration 26, loss = 0.00185220 Iteration 27, loss = 0.00178243 Iteration 28, loss = 0.00173274 Iteration 29, loss = 0.00172976 Iteration 30, loss = 0.00168817 Iteration 31, loss = 0.00150640 Iteration 32, loss = 0.00145626 Iteration 33, loss = 0.00139925 Iteration 34, loss = 0.00140187 Iteration 35, loss = 0.00155982 Iteration 36, loss = 0.00143928 Iteration 37, loss = 0.00140613 Iteration 38, loss = 0.00119222 Iteration 39, loss = 0.00117935 Iteration 40, loss = 0.00114464 Iteration 41, loss = 0.00109062 Iteration 42, loss = 0.00106955 Iteration 43, loss = 0.00106004 Iteration 44, loss = 0.00105380 Iteration 45, loss = 0.00096378 Iteration 46, loss = 0.00095788 Iteration 47, loss = 0.00099292 Iteration 48, loss = 0.00092179 Iteration 49, loss = 0.00089029 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.18263047 Iteration 2, loss = 0.02943248 Iteration 3, loss = 0.01412870 Iteration 4, loss = 0.00963131 Iteration 5, loss = 0.00749458 Iteration 6, loss = 0.00623275 Iteration 7, loss = 0.00539230 Iteration 8, loss = 0.00487889 Iteration 9, loss = 0.00436578 Iteration 10, loss = 0.00397110 Iteration 11, loss = 0.00360593 Iteration 12, loss = 0.00328849 Iteration 13, loss = 0.00300334 Iteration 14, loss = 0.00286206 Iteration 15, loss = 0.00262704 Iteration 16, loss = 0.00246512 Iteration 17, loss = 0.00232175 Iteration 18, loss = 0.00219865 Iteration 19, loss = 0.00206378 Iteration 20, loss = 0.00201614 Iteration 21, loss = 0.00184793 Iteration 22, loss = 0.00177612 Iteration 23, loss = 0.00173271 Iteration 24, loss = 0.00156938 Iteration 25, loss = 0.00152809 Iteration 26, loss = 0.00146498 Iteration 27, loss = 0.00140063 Iteration 28, loss = 0.00132926 Iteration 29, loss = 0.00126963 Iteration 30, loss = 0.00124653 Iteration 31, loss = 0.00118820 Iteration 32, loss = 0.00113131 Iteration 33, loss = 0.00110704 Iteration 34, loss = 0.00104495 Iteration 35, loss = 0.00104595 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.22401205 Iteration 2, loss = 0.03550550 Iteration 3, loss = 0.01653913 Iteration 4, loss = 0.01090494 Iteration 5, loss = 0.00832454 Iteration 6, loss = 0.00683679 Iteration 7, loss = 0.00585800 Iteration 8, loss = 0.00520167 Iteration 9, loss = 0.00472553 Iteration 10, loss = 0.00430480 Iteration 11, loss = 0.00385307 Iteration 12, loss = 0.00371798 Iteration 13, loss = 0.00335621 Iteration 14, loss = 0.00317439 Iteration 15, loss = 0.00287292 Iteration 16, loss = 0.00274488 Iteration 17, loss = 0.00259804 Iteration 18, loss = 0.00255205 Iteration 19, loss = 0.00232075 Iteration 20, loss = 0.00230147 Iteration 21, loss = 0.00218100 Iteration 22, loss = 0.00198697 Iteration 23, loss = 0.00188781 Iteration 24, loss = 0.00183692 Iteration 25, loss = 0.00180691 Iteration 26, loss = 0.00168298 Iteration 27, loss = 0.00165525 Iteration 28, loss = 0.00170915 Iteration 29, loss = 0.00158327 Iteration 30, loss = 0.00144918 Iteration 31, loss = 0.00145195 Iteration 32, loss = 0.00129095 Iteration 33, loss = 0.00148743 Iteration 34, loss = 0.00132789 Iteration 35, loss = 0.00154025 Iteration 36, loss = 0.00123190 Iteration 37, loss = 0.00116116 Iteration 38, loss = 0.00114767 Iteration 39, loss = 0.00110823 Iteration 40, loss = 0.00105189 Iteration 41, loss = 0.00105337 Iteration 42, loss = 0.00104633 Iteration 43, loss = 0.00102350 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.31965576 Iteration 2, loss = 0.04999539 Iteration 3, loss = 0.02233132 Iteration 4, loss = 0.01389874 Iteration 5, loss = 0.01027150 Iteration 6, loss = 0.00818611 Iteration 7, loss = 0.00700952 Iteration 8, loss = 0.00609227 Iteration 9, loss = 0.00539188 Iteration 10, loss = 0.00482226 Iteration 11, loss = 0.00451209 Iteration 12, loss = 0.00429814 Iteration 13, loss = 0.00373817 Iteration 14, loss = 0.00360436 Iteration 15, loss = 0.00336788 Iteration 16, loss = 0.00321301 Iteration 17, loss = 0.00294608 Iteration 18, loss = 0.00294837 Iteration 19, loss = 0.00280924 Iteration 20, loss = 0.00266356 Iteration 21, loss = 0.00248959 Iteration 22, loss = 0.00233001 Iteration 23, loss = 0.00234595 Iteration 24, loss = 0.00227235 Iteration 25, loss = 0.00216885 Iteration 26, loss = 0.00214649 Iteration 27, loss = 0.00191994 Iteration 28, loss = 0.00189438 Iteration 29, loss = 0.00181177 Iteration 30, loss = 0.00184639 Iteration 31, loss = 0.00169820 Iteration 32, loss = 0.00166496 Iteration 33, loss = 0.00192917 Iteration 34, loss = 0.00197153 Iteration 35, loss = 0.00151555 Iteration 36, loss = 0.00140021 Iteration 37, loss = 0.00136904 Iteration 38, loss = 0.00141569 Iteration 39, loss = 0.00135361 Iteration 40, loss = 0.00136903 Iteration 41, loss = 0.00124089 Iteration 42, loss = 0.00123305 Iteration 43, loss = 0.00114230 Iteration 44, loss = 0.00128475 Iteration 45, loss = 0.00114473 Iteration 46, loss = 0.00120337 Iteration 47, loss = 0.00105184 Iteration 48, loss = 0.00107924 Iteration 49, loss = 0.00097687 Iteration 50, loss = 0.00109109 Iteration 51, loss = 0.00117004 Iteration 52, loss = 0.00116962 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.27003713 Iteration 2, loss = 0.04172470 Iteration 3, loss = 0.01873595 Iteration 4, loss = 0.01212604 Iteration 5, loss = 0.00902883 Iteration 6, loss = 0.00739931 Iteration 7, loss = 0.00642704 Iteration 8, loss = 0.00552507 Iteration 9, loss = 0.00496785 Iteration 10, loss = 0.00447793 Iteration 11, loss = 0.00408543 Iteration 12, loss = 0.00366187 Iteration 13, loss = 0.00347912 Iteration 14, loss = 0.00322857 Iteration 15, loss = 0.00303120 Iteration 16, loss = 0.00288073 Iteration 17, loss = 0.00264583 Iteration 18, loss = 0.00258542 Iteration 19, loss = 0.00238178 Iteration 20, loss = 0.00227061 Iteration 21, loss = 0.00210884 Iteration 22, loss = 0.00201955 Iteration 23, loss = 0.00193548 Iteration 24, loss = 0.00192865 Iteration 25, loss = 0.00182062 Iteration 26, loss = 0.00167916 Iteration 27, loss = 0.00162408 Iteration 28, loss = 0.00159935 Iteration 29, loss = 0.00150069 Iteration 30, loss = 0.00159159 Iteration 31, loss = 0.00141056 Iteration 32, loss = 0.00137010 Iteration 33, loss = 0.00131787 Iteration 34, loss = 0.00132576 Iteration 35, loss = 0.00124340 Iteration 36, loss = 0.00120173 Iteration 37, loss = 0.00123124 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.26793467 Iteration 2, loss = 0.04252859 Iteration 3, loss = 0.01895820 Iteration 4, loss = 0.01199710 Iteration 5, loss = 0.00892235 Iteration 6, loss = 0.00730964 Iteration 7, loss = 0.00615262 Iteration 8, loss = 0.00545214 Iteration 9, loss = 0.00492448 Iteration 10, loss = 0.00442075 Iteration 11, loss = 0.00402896 Iteration 12, loss = 0.00385822 Iteration 13, loss = 0.00352025 Iteration 14, loss = 0.00325974 Iteration 15, loss = 0.00308738 Iteration 16, loss = 0.00285108 Iteration 17, loss = 0.00271672 Iteration 18, loss = 0.00252495 Iteration 19, loss = 0.00253992 Iteration 20, loss = 0.00230991 Iteration 21, loss = 0.00222296 Iteration 22, loss = 0.00218390 Iteration 23, loss = 0.00197106 Iteration 24, loss = 0.00194466 Iteration 25, loss = 0.00184177 Iteration 26, loss = 0.00179625 Iteration 27, loss = 0.00177911 Iteration 28, loss = 0.00179810 Iteration 29, loss = 0.00164754 Iteration 30, loss = 0.00152582 Iteration 31, loss = 0.00151229 Iteration 32, loss = 0.00148500 Iteration 33, loss = 0.00140314 Iteration 34, loss = 0.00147240 Iteration 35, loss = 0.00125510 Iteration 36, loss = 0.00134387 Iteration 37, loss = 0.00138348 Iteration 38, loss = 0.00117023 Iteration 39, loss = 0.00117776 Iteration 40, loss = 0.00125334 Iteration 41, loss = 0.00107424 Iteration 42, loss = 0.00106724 Iteration 43, loss = 0.00106291 Iteration 44, loss = 0.00107944 Iteration 45, loss = 0.00097179 Iteration 46, loss = 0.00097799 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.30801847 Iteration 2, loss = 0.04648982 Iteration 3, loss = 0.02053536 Iteration 4, loss = 0.01298015 Iteration 5, loss = 0.00976217 Iteration 6, loss = 0.00767773 Iteration 7, loss = 0.00652609 Iteration 8, loss = 0.00566382 Iteration 9, loss = 0.00506429 Iteration 10, loss = 0.00456241 Iteration 11, loss = 0.00421584 Iteration 12, loss = 0.00383608 Iteration 13, loss = 0.00359980 Iteration 14, loss = 0.00337577 Iteration 15, loss = 0.00315300 Iteration 16, loss = 0.00292449 Iteration 17, loss = 0.00285124 Iteration 18, loss = 0.00272715 Iteration 19, loss = 0.00251327 Iteration 20, loss = 0.00246406 Iteration 21, loss = 0.00222237 Iteration 22, loss = 0.00213935 Iteration 23, loss = 0.00209982 Iteration 24, loss = 0.00208820 Iteration 25, loss = 0.00200175 Iteration 26, loss = 0.00180473 Iteration 27, loss = 0.00177407 Iteration 28, loss = 0.00167976 Iteration 29, loss = 0.00162741 Iteration 30, loss = 0.00157720 Iteration 31, loss = 0.00152651 Iteration 32, loss = 0.00145656 Iteration 33, loss = 0.00138516 Iteration 34, loss = 0.00131234 Iteration 35, loss = 0.00135782 Iteration 36, loss = 0.00127366 Iteration 37, loss = 0.00122484 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.26318764 Iteration 2, loss = 0.04565139 Iteration 3, loss = 0.02082034 Iteration 4, loss = 0.01329638 Iteration 5, loss = 0.00987792 Iteration 6, loss = 0.00800751 Iteration 7, loss = 0.00698534 Iteration 8, loss = 0.00600235 Iteration 9, loss = 0.00530929 Iteration 10, loss = 0.00475785 Iteration 11, loss = 0.00441084 Iteration 12, loss = 0.00402553 Iteration 13, loss = 0.00379892 Iteration 14, loss = 0.00346943 Iteration 15, loss = 0.00334871 Iteration 16, loss = 0.00307811 Iteration 17, loss = 0.00291485 Iteration 18, loss = 0.00275913 Iteration 19, loss = 0.00254160 Iteration 20, loss = 0.00252892 Iteration 21, loss = 0.00236437 Iteration 22, loss = 0.00228884 Iteration 23, loss = 0.00214070 Iteration 24, loss = 0.00206783 Iteration 25, loss = 0.00199006 Iteration 26, loss = 0.00186824 Iteration 27, loss = 0.00174368 Iteration 28, loss = 0.00177742 Iteration 29, loss = 0.00166319 Iteration 30, loss = 0.00160701 Iteration 31, loss = 0.00161733 Iteration 32, loss = 0.00153615 Iteration 33, loss = 0.00141009 Iteration 34, loss = 0.00136145 Iteration 35, loss = 0.00140978 Iteration 36, loss = 0.00133564 Iteration 37, loss = 0.00123676 Iteration 38, loss = 0.00118835 Iteration 39, loss = 0.00115035 Iteration 40, loss = 0.00116956 Iteration 41, loss = 0.00114027 Iteration 42, loss = 0.00123286 Iteration 43, loss = 0.00103929 Iteration 44, loss = 0.00105374 Iteration 45, loss = 0.00102292 Iteration 46, loss = 0.00106655 Iteration 47, loss = 0.00091408 Iteration 48, loss = 0.00129273 Iteration 49, loss = 0.00090651 Iteration 50, loss = 0.00082599 Iteration 51, loss = 0.00084601 Iteration 52, loss = 0.00079721 Iteration 53, loss = 0.00078234 Iteration 54, loss = 0.00087993 Iteration 55, loss = 0.00079757 Iteration 56, loss = 0.00073167 Iteration 57, loss = 0.00073116 Iteration 58, loss = 0.00071127 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.12127820 Iteration 2, loss = 0.02072843 Iteration 3, loss = 0.01110797 Iteration 4, loss = 0.00782587 Iteration 5, loss = 0.00632322 Iteration 6, loss = 0.00525796 Iteration 7, loss = 0.00452405 Iteration 8, loss = 0.00419473 Iteration 9, loss = 0.00353666 Iteration 10, loss = 0.00327247 Iteration 11, loss = 0.00294771 Iteration 12, loss = 0.00276566 Iteration 13, loss = 0.00245030 Iteration 14, loss = 0.00232220 Iteration 15, loss = 0.00222795 Iteration 16, loss = 0.00201928 Iteration 17, loss = 0.00200911 Iteration 18, loss = 0.00194721 Iteration 19, loss = 0.00166144 Iteration 20, loss = 0.00177600 Iteration 21, loss = 0.00164092 Iteration 22, loss = 0.00155555 Iteration 23, loss = 0.00145862 Iteration 24, loss = 0.00140466 Iteration 25, loss = 0.00138341 Iteration 26, loss = 0.00136885 Iteration 27, loss = 0.00129783 Iteration 28, loss = 0.00122937 Iteration 29, loss = 0.00127309 Iteration 30, loss = 0.00113261 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.21426995 Iteration 2, loss = 0.03249583 Iteration 3, loss = 0.01537290 Iteration 4, loss = 0.01024058 Iteration 5, loss = 0.00786101 Iteration 6, loss = 0.00643570 Iteration 7, loss = 0.00556831 Iteration 8, loss = 0.00489286 Iteration 9, loss = 0.00428525 Iteration 10, loss = 0.00401458 Iteration 11, loss = 0.00364979 Iteration 12, loss = 0.00338262 Iteration 13, loss = 0.00304160 Iteration 14, loss = 0.00293898 Iteration 15, loss = 0.00271928 Iteration 16, loss = 0.00249915 Iteration 17, loss = 0.00236647 Iteration 18, loss = 0.00227870 Iteration 19, loss = 0.00208148 Iteration 20, loss = 0.00198467 Iteration 21, loss = 0.00194444 Iteration 22, loss = 0.00181032 Iteration 23, loss = 0.00175757 Iteration 24, loss = 0.00169512 Iteration 25, loss = 0.00173471 Iteration 26, loss = 0.00152753 Iteration 27, loss = 0.00148670 Iteration 28, loss = 0.00145472 Iteration 29, loss = 0.00138784 Iteration 30, loss = 0.00133598 Iteration 31, loss = 0.00134638 Iteration 32, loss = 0.00127523 Iteration 33, loss = 0.00120041 Iteration 34, loss = 0.00124321 Iteration 35, loss = 0.00122755 Iteration 36, loss = 0.00115259 Iteration 37, loss = 0.00108383 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.23674637 Iteration 2, loss = 0.03595083 Iteration 3, loss = 0.01683527 Iteration 4, loss = 0.01107406 Iteration 5, loss = 0.00844146 Iteration 6, loss = 0.00688865 Iteration 7, loss = 0.00602009 Iteration 8, loss = 0.00524371 Iteration 9, loss = 0.00483265 Iteration 10, loss = 0.00439879 Iteration 11, loss = 0.00406095 Iteration 12, loss = 0.00391505 Iteration 13, loss = 0.00347216 Iteration 14, loss = 0.00324601 Iteration 15, loss = 0.00307566 Iteration 16, loss = 0.00287395 Iteration 17, loss = 0.00274681 Iteration 18, loss = 0.00255677 Iteration 19, loss = 0.00257505 Iteration 20, loss = 0.00246785 Iteration 21, loss = 0.00215445 Iteration 22, loss = 0.00215259 Iteration 23, loss = 0.00224179 Iteration 24, loss = 0.00196688 Iteration 25, loss = 0.00187059 Iteration 26, loss = 0.00189581 Iteration 27, loss = 0.00179532 Iteration 28, loss = 0.00170485 Iteration 29, loss = 0.00162195 Iteration 30, loss = 0.00161816 Iteration 31, loss = 0.00160414 Iteration 32, loss = 0.00142311 Iteration 33, loss = 0.00140204 Iteration 34, loss = 0.00139702 Iteration 35, loss = 0.00135571 Iteration 36, loss = 0.00132546 Iteration 37, loss = 0.00129803 Iteration 38, loss = 0.00122721 Iteration 39, loss = 0.00141614 Iteration 40, loss = 0.00111214 Iteration 41, loss = 0.00106847 Iteration 42, loss = 0.00106131 Iteration 43, loss = 0.00097222 Iteration 44, loss = 0.00094276 Iteration 45, loss = 0.00094898 Iteration 46, loss = 0.00094780 Iteration 47, loss = 0.00099432 Iteration 48, loss = 0.00086875 Iteration 49, loss = 0.00085344 Iteration 50, loss = 0.00081591 Iteration 51, loss = 0.00083785 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.21697995 Iteration 2, loss = 0.03282354 Iteration 3, loss = 0.01454773 Iteration 4, loss = 0.00945790 Iteration 5, loss = 0.00699274 Iteration 6, loss = 0.00570672 Iteration 7, loss = 0.00486882 Iteration 8, loss = 0.00430656 Iteration 9, loss = 0.00374131 Iteration 10, loss = 0.00357056 Iteration 11, loss = 0.00323171 Iteration 12, loss = 0.00300481 Iteration 13, loss = 0.00276588 Iteration 14, loss = 0.00267175 Iteration 15, loss = 0.00249326 Iteration 16, loss = 0.00236580 Iteration 17, loss = 0.00221225 Iteration 18, loss = 0.00212606 Iteration 19, loss = 0.00202252 Iteration 20, loss = 0.00183773 Iteration 21, loss = 0.00185582 Iteration 22, loss = 0.00178325 Iteration 23, loss = 0.00169095 Iteration 24, loss = 0.00160280 Iteration 25, loss = 0.00160160 Iteration 26, loss = 0.00145003 Iteration 27, loss = 0.00137542 Iteration 28, loss = 0.00138233 Iteration 29, loss = 0.00133555 Iteration 30, loss = 0.00122863 Iteration 31, loss = 0.00129710 Iteration 32, loss = 0.00122546 Iteration 33, loss = 0.00108652 Iteration 34, loss = 0.00108110 Iteration 35, loss = 0.00105709 Iteration 36, loss = 0.00096216 Iteration 37, loss = 0.00095571 Iteration 38, loss = 0.00099885 Iteration 39, loss = 0.00087529 Iteration 40, loss = 0.00090823 Iteration 41, loss = 0.00085784 Iteration 42, loss = 0.00087062 Iteration 43, loss = 0.00081232 Iteration 44, loss = 0.00077154 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.19632468 Iteration 2, loss = 0.03055549 Iteration 3, loss = 0.01461438 Iteration 4, loss = 0.00968938 Iteration 5, loss = 0.00736134 Iteration 6, loss = 0.00600776 Iteration 7, loss = 0.00515646 Iteration 8, loss = 0.00452884 Iteration 9, loss = 0.00403669 Iteration 10, loss = 0.00370045 Iteration 11, loss = 0.00330879 Iteration 12, loss = 0.00307276 Iteration 13, loss = 0.00290638 Iteration 14, loss = 0.00265674 Iteration 15, loss = 0.00262239 Iteration 16, loss = 0.00245566 Iteration 17, loss = 0.00219585 Iteration 18, loss = 0.00214130 Iteration 19, loss = 0.00203458 Iteration 20, loss = 0.00193490 Iteration 21, loss = 0.00195181 Iteration 22, loss = 0.00180976 Iteration 23, loss = 0.00170567 Iteration 24, loss = 0.00163634 Iteration 25, loss = 0.00154265 Iteration 26, loss = 0.00161950 Iteration 27, loss = 0.00146768 Iteration 28, loss = 0.00140586 Iteration 29, loss = 0.00134007 Iteration 30, loss = 0.00134418 Iteration 31, loss = 0.00120919 Iteration 32, loss = 0.00119716 Iteration 33, loss = 0.00122115 Iteration 34, loss = 0.00115435 Iteration 35, loss = 0.00114143 Iteration 36, loss = 0.00103415 Iteration 37, loss = 0.00111776 Iteration 38, loss = 0.00101849 Iteration 39, loss = 0.00102315 Iteration 40, loss = 0.00094604 Iteration 41, loss = 0.00089924 Iteration 42, loss = 0.00122708 Iteration 43, loss = 0.00083898 Iteration 44, loss = 0.00080850 Iteration 45, loss = 0.00079565 Iteration 46, loss = 0.00076268 Iteration 47, loss = 0.00077751 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.30085656 Iteration 2, loss = 0.04713429 Iteration 3, loss = 0.02010077 Iteration 4, loss = 0.01263982 Iteration 5, loss = 0.00933644 Iteration 6, loss = 0.00753929 Iteration 7, loss = 0.00632073 Iteration 8, loss = 0.00559298 Iteration 9, loss = 0.00495829 Iteration 10, loss = 0.00449833 Iteration 11, loss = 0.00403699 Iteration 12, loss = 0.00375821 Iteration 13, loss = 0.00348822 Iteration 14, loss = 0.00319655 Iteration 15, loss = 0.00298528 Iteration 16, loss = 0.00285051 Iteration 17, loss = 0.00263573 Iteration 18, loss = 0.00248695 Iteration 19, loss = 0.00238588 Iteration 20, loss = 0.00229111 Iteration 21, loss = 0.00221657 Iteration 22, loss = 0.00201589 Iteration 23, loss = 0.00204073 Iteration 24, loss = 0.00191829 Iteration 25, loss = 0.00189575 Iteration 26, loss = 0.00180331 Iteration 27, loss = 0.00170189 Iteration 28, loss = 0.00165547 Iteration 29, loss = 0.00158880 Iteration 30, loss = 0.00150944 Iteration 31, loss = 0.00145110 Iteration 32, loss = 0.00151186 Iteration 33, loss = 0.00137867 Iteration 34, loss = 0.00135031 Iteration 35, loss = 0.00133132 Iteration 36, loss = 0.00129561 Iteration 37, loss = 0.00141166 Iteration 38, loss = 0.00122200 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.15998108 Iteration 2, loss = 0.02616840 Iteration 3, loss = 0.01296120 Iteration 4, loss = 0.00877303 Iteration 5, loss = 0.00674221 Iteration 6, loss = 0.00553456 Iteration 7, loss = 0.00472848 Iteration 8, loss = 0.00418712 Iteration 9, loss = 0.00377503 Iteration 10, loss = 0.00363043 Iteration 11, loss = 0.00309220 Iteration 12, loss = 0.00280014 Iteration 13, loss = 0.00263797 Iteration 14, loss = 0.00242306 Iteration 15, loss = 0.00240715 Iteration 16, loss = 0.00220648 Iteration 17, loss = 0.00209705 Iteration 18, loss = 0.00194061 Iteration 19, loss = 0.00192418 Iteration 20, loss = 0.00180766 Iteration 21, loss = 0.00178009 Iteration 22, loss = 0.00173281 Iteration 23, loss = 0.00160904 Iteration 24, loss = 0.00156190 Iteration 25, loss = 0.00152545 Iteration 26, loss = 0.00141819 Iteration 27, loss = 0.00132449 Iteration 28, loss = 0.00142976 Iteration 29, loss = 0.00122419 Iteration 30, loss = 0.00123142 Iteration 31, loss = 0.00124170 Iteration 32, loss = 0.00119148 Iteration 33, loss = 0.00111318 Iteration 34, loss = 0.00101748 Iteration 35, loss = 0.00101322 Iteration 36, loss = 0.00098681 Iteration 37, loss = 0.00102486 Iteration 38, loss = 0.00116560 Iteration 39, loss = 0.00089403 Iteration 40, loss = 0.00092569 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.24356417 Iteration 2, loss = 0.03747884 Iteration 3, loss = 0.01766401 Iteration 4, loss = 0.01172079 Iteration 5, loss = 0.00887052 Iteration 6, loss = 0.00727713 Iteration 7, loss = 0.00643801 Iteration 8, loss = 0.00553316 Iteration 9, loss = 0.00489933 Iteration 10, loss = 0.00432386 Iteration 11, loss = 0.00403426 Iteration 12, loss = 0.00371450 Iteration 13, loss = 0.00341258 Iteration 14, loss = 0.00314888 Iteration 15, loss = 0.00308616 Iteration 16, loss = 0.00281451 Iteration 17, loss = 0.00267077 Iteration 18, loss = 0.00256358 Iteration 19, loss = 0.00244654 Iteration 20, loss = 0.00231989 Iteration 21, loss = 0.00225552 Iteration 22, loss = 0.00208416 Iteration 23, loss = 0.00205707 Iteration 24, loss = 0.00207925 Iteration 25, loss = 0.00194366 Iteration 26, loss = 0.00186574 Iteration 27, loss = 0.00176877 Iteration 28, loss = 0.00177459 Iteration 29, loss = 0.00159392 Iteration 30, loss = 0.00158229 Iteration 31, loss = 0.00171097 Iteration 32, loss = 0.00160694 Iteration 33, loss = 0.00141822 Iteration 34, loss = 0.00148679 Iteration 35, loss = 0.00132993 Iteration 36, loss = 0.00137151 Iteration 37, loss = 0.00130748 Iteration 38, loss = 0.00135357 Iteration 39, loss = 0.00118296 Iteration 40, loss = 0.00119074 Iteration 41, loss = 0.00111142 Iteration 42, loss = 0.00107732 Iteration 43, loss = 0.00101960 Iteration 44, loss = 0.00105419 Iteration 45, loss = 0.00114559 Iteration 46, loss = 0.00144276 Iteration 47, loss = 0.00102358 Iteration 48, loss = 0.00092292 Iteration 49, loss = 0.00088261 Iteration 50, loss = 0.00086711 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.14103731 Iteration 2, loss = 0.02395332 Iteration 3, loss = 0.01247577 Iteration 4, loss = 0.00869978 Iteration 5, loss = 0.00689858 Iteration 6, loss = 0.00588972 Iteration 7, loss = 0.00501119 Iteration 8, loss = 0.00447889 Iteration 9, loss = 0.00400156 Iteration 10, loss = 0.00365607 Iteration 11, loss = 0.00331431 Iteration 12, loss = 0.00309497 Iteration 13, loss = 0.00288493 Iteration 14, loss = 0.00260630 Iteration 15, loss = 0.00247660 Iteration 16, loss = 0.00229509 Iteration 17, loss = 0.00218255 Iteration 18, loss = 0.00212866 Iteration 19, loss = 0.00193324 Iteration 20, loss = 0.00190854 Iteration 21, loss = 0.00177168 Iteration 22, loss = 0.00171170 Iteration 23, loss = 0.00167578 Iteration 24, loss = 0.00160131 Iteration 25, loss = 0.00153907 Iteration 26, loss = 0.00149369 Iteration 27, loss = 0.00141580 Iteration 28, loss = 0.00129478 Iteration 29, loss = 0.00125578 Iteration 30, loss = 0.00125152 Iteration 31, loss = 0.00118953 Iteration 32, loss = 0.00114460 Iteration 33, loss = 0.00108857 Iteration 34, loss = 0.00112899 Iteration 35, loss = 0.00100057 Iteration 36, loss = 0.00095899 Iteration 37, loss = 0.00099890 Iteration 38, loss = 0.00093966 Iteration 39, loss = 0.00094404 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.19175674 Iteration 2, loss = 0.02865194 Iteration 3, loss = 0.01378161 Iteration 4, loss = 0.00916791 Iteration 5, loss = 0.00715318 Iteration 6, loss = 0.00583771 Iteration 7, loss = 0.00510458 Iteration 8, loss = 0.00451028 Iteration 9, loss = 0.00401469 Iteration 10, loss = 0.00374251 Iteration 11, loss = 0.00339787 Iteration 12, loss = 0.00312493 Iteration 13, loss = 0.00291561 Iteration 14, loss = 0.00280298 Iteration 15, loss = 0.00255680 Iteration 16, loss = 0.00250622 Iteration 17, loss = 0.00231202 Iteration 18, loss = 0.00218316 Iteration 19, loss = 0.00204868 Iteration 20, loss = 0.00192405 Iteration 21, loss = 0.00189972 Iteration 22, loss = 0.00185437 Iteration 23, loss = 0.00172171 Iteration 24, loss = 0.00173079 Iteration 25, loss = 0.00155917 Iteration 26, loss = 0.00170349 Iteration 27, loss = 0.00147824 Iteration 28, loss = 0.00138160 Iteration 29, loss = 0.00129939 Iteration 30, loss = 0.00149983 Iteration 31, loss = 0.00121377 Iteration 32, loss = 0.00123131 Iteration 33, loss = 0.00122028 Iteration 34, loss = 0.00113509 Iteration 35, loss = 0.00108174 Iteration 36, loss = 0.00117619 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.23329504 Iteration 2, loss = 0.03798636 Iteration 3, loss = 0.01680087 Iteration 4, loss = 0.01061781 Iteration 5, loss = 0.00780230 Iteration 6, loss = 0.00635184 Iteration 7, loss = 0.00532995 Iteration 8, loss = 0.00451070 Iteration 9, loss = 0.00384001 Iteration 10, loss = 0.00340476 Iteration 11, loss = 0.00303577 Iteration 12, loss = 0.00270462 Iteration 13, loss = 0.00242565 Iteration 14, loss = 0.00238400 Iteration 15, loss = 0.00201250 Iteration 16, loss = 0.00186556 Iteration 17, loss = 0.00169566 Iteration 18, loss = 0.00160141 Iteration 19, loss = 0.00150028 Iteration 20, loss = 0.00142534 Iteration 21, loss = 0.00142757 Iteration 22, loss = 0.00127965 Iteration 23, loss = 0.00119484 Iteration 24, loss = 0.00122185 Iteration 25, loss = 0.00119941 Iteration 26, loss = 0.00101283 Iteration 27, loss = 0.00095637 Iteration 28, loss = 0.00093484 Iteration 29, loss = 0.00094044 Iteration 30, loss = 0.00085044 Iteration 31, loss = 0.00081333 Iteration 32, loss = 0.00079287 Iteration 33, loss = 0.00077859 Iteration 34, loss = 0.00073054 Iteration 35, loss = 0.00074000 Iteration 36, loss = 0.00071108 Iteration 37, loss = 0.00070894 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.18918977 Iteration 2, loss = 0.03102997 Iteration 3, loss = 0.01497729 Iteration 4, loss = 0.01003740 Iteration 5, loss = 0.00774090 Iteration 6, loss = 0.00640155 Iteration 7, loss = 0.00541384 Iteration 8, loss = 0.00491127 Iteration 9, loss = 0.00418452 Iteration 10, loss = 0.00389708 Iteration 11, loss = 0.00352965 Iteration 12, loss = 0.00333048 Iteration 13, loss = 0.00299706 Iteration 14, loss = 0.00297403 Iteration 15, loss = 0.00257965 Iteration 16, loss = 0.00245748 Iteration 17, loss = 0.00243220 Iteration 18, loss = 0.00227636 Iteration 19, loss = 0.00219768 Iteration 20, loss = 0.00212095 Iteration 21, loss = 0.00191819 Iteration 22, loss = 0.00187342 Iteration 23, loss = 0.00171173 Iteration 24, loss = 0.00177891 Iteration 25, loss = 0.00163474 Iteration 26, loss = 0.00173534 Iteration 27, loss = 0.00199210 Iteration 28, loss = 0.00151793 Iteration 29, loss = 0.00141216 Iteration 30, loss = 0.00144781 Iteration 31, loss = 0.00134483 Iteration 32, loss = 0.00136602 Iteration 33, loss = 0.00129405 Iteration 34, loss = 0.00119096 Iteration 35, loss = 0.00120733 Iteration 36, loss = 0.00109235 Iteration 37, loss = 0.00105349 Iteration 38, loss = 0.00106243 Iteration 39, loss = 0.00120224 Iteration 40, loss = 0.00108339 Iteration 41, loss = 0.00097002 Iteration 42, loss = 0.00088866 Iteration 43, loss = 0.00092723 Iteration 44, loss = 0.00093119 Iteration 45, loss = 0.00096888 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.12865807 Iteration 2, loss = 0.02168545 Iteration 3, loss = 0.01135989 Iteration 4, loss = 0.00825866 Iteration 5, loss = 0.00638067 Iteration 6, loss = 0.00540334 Iteration 7, loss = 0.00471462 Iteration 8, loss = 0.00416616 Iteration 9, loss = 0.00395420 Iteration 10, loss = 0.00343526 Iteration 11, loss = 0.00327321 Iteration 12, loss = 0.00280607 Iteration 13, loss = 0.00265751 Iteration 14, loss = 0.00248752 Iteration 15, loss = 0.00231808 Iteration 16, loss = 0.00220414 Iteration 17, loss = 0.00203399 Iteration 18, loss = 0.00199609 Iteration 19, loss = 0.00189430 Iteration 20, loss = 0.00176966 Iteration 21, loss = 0.00169492 Iteration 22, loss = 0.00161191 Iteration 23, loss = 0.00182464 Iteration 24, loss = 0.00150585 Iteration 25, loss = 0.00138672 Iteration 26, loss = 0.00139368 Iteration 27, loss = 0.00132726 Iteration 28, loss = 0.00126760 Iteration 29, loss = 0.00122218 Iteration 30, loss = 0.00115463 Iteration 31, loss = 0.00113919 Iteration 32, loss = 0.00112982 Iteration 33, loss = 0.00105058 Iteration 34, loss = 0.00100691 Iteration 35, loss = 0.00099298 Iteration 36, loss = 0.00096780 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.13846763 Iteration 2, loss = 0.02341315 Iteration 3, loss = 0.01181279 Iteration 4, loss = 0.00815587 Iteration 5, loss = 0.00643200 Iteration 6, loss = 0.00539649 Iteration 7, loss = 0.00464041 Iteration 8, loss = 0.00403818 Iteration 9, loss = 0.00368973 Iteration 10, loss = 0.00325089 Iteration 11, loss = 0.00298917 Iteration 12, loss = 0.00284234 Iteration 13, loss = 0.00248177 Iteration 14, loss = 0.00234967 Iteration 15, loss = 0.00217531 Iteration 16, loss = 0.00204936 Iteration 17, loss = 0.00194241 Iteration 18, loss = 0.00182513 Iteration 19, loss = 0.00176250 Iteration 20, loss = 0.00170742 Iteration 21, loss = 0.00160831 Iteration 22, loss = 0.00156466 Iteration 23, loss = 0.00146196 Iteration 24, loss = 0.00142125 Iteration 25, loss = 0.00138437 Iteration 26, loss = 0.00130152 Iteration 27, loss = 0.00125306 Iteration 28, loss = 0.00119931 Iteration 29, loss = 0.00115714 Iteration 30, loss = 0.00112611 Iteration 31, loss = 0.00108913 Iteration 32, loss = 0.00106121 Iteration 33, loss = 0.00101031 Iteration 34, loss = 0.00097932 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.26487711 Iteration 2, loss = 0.03883244 Iteration 3, loss = 0.01809672 Iteration 4, loss = 0.01189380 Iteration 5, loss = 0.00897229 Iteration 6, loss = 0.00750512 Iteration 7, loss = 0.00623250 Iteration 8, loss = 0.00560729 Iteration 9, loss = 0.00496513 Iteration 10, loss = 0.00445119 Iteration 11, loss = 0.00408760 Iteration 12, loss = 0.00379925 Iteration 13, loss = 0.00354430 Iteration 14, loss = 0.00331183 Iteration 15, loss = 0.00311601 Iteration 16, loss = 0.00285990 Iteration 17, loss = 0.00277798 Iteration 18, loss = 0.00253427 Iteration 19, loss = 0.00246299 Iteration 20, loss = 0.00232827 Iteration 21, loss = 0.00217335 Iteration 22, loss = 0.00212756 Iteration 23, loss = 0.00232519 Iteration 24, loss = 0.00190012 Iteration 25, loss = 0.00184601 Iteration 26, loss = 0.00181054 Iteration 27, loss = 0.00172012 Iteration 28, loss = 0.00176440 Iteration 29, loss = 0.00158095 Iteration 30, loss = 0.00152995 Iteration 31, loss = 0.00153866 Iteration 32, loss = 0.00148443 Iteration 33, loss = 0.00143546 Iteration 34, loss = 0.00137118 Iteration 35, loss = 0.00132542 Iteration 36, loss = 0.00140395 Iteration 37, loss = 0.00122338 Iteration 38, loss = 0.00114696 Iteration 39, loss = 0.00116395 Iteration 40, loss = 0.00118782 Iteration 41, loss = 0.00107011 Iteration 42, loss = 0.00106368 Iteration 43, loss = 0.00110295 Iteration 44, loss = 0.00097370 Iteration 45, loss = 0.00093815 Iteration 46, loss = 0.00091670 Iteration 47, loss = 0.00092217 Iteration 48, loss = 0.00088858 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.32832637 Iteration 2, loss = 0.04990203 Iteration 3, loss = 0.02130518 Iteration 4, loss = 0.01322164 Iteration 5, loss = 0.00962155 Iteration 6, loss = 0.00766395 Iteration 7, loss = 0.00642653 Iteration 8, loss = 0.00568609 Iteration 9, loss = 0.00513807 Iteration 10, loss = 0.00455298 Iteration 11, loss = 0.00412048 Iteration 12, loss = 0.00379574 Iteration 13, loss = 0.00350134 Iteration 14, loss = 0.00333578 Iteration 15, loss = 0.00310523 Iteration 16, loss = 0.00286603 Iteration 17, loss = 0.00283738 Iteration 18, loss = 0.00263254 Iteration 19, loss = 0.00248500 Iteration 20, loss = 0.00236727 Iteration 21, loss = 0.00225253 Iteration 22, loss = 0.00216436 Iteration 23, loss = 0.00201300 Iteration 24, loss = 0.00197739 Iteration 25, loss = 0.00186880 Iteration 26, loss = 0.00176404 Iteration 27, loss = 0.00175892 Iteration 28, loss = 0.00168960 Iteration 29, loss = 0.00164200 Iteration 30, loss = 0.00152889 Iteration 31, loss = 0.00146773 Iteration 32, loss = 0.00152376 Iteration 33, loss = 0.00140282 Iteration 34, loss = 0.00133054 Iteration 35, loss = 0.00142523 Iteration 36, loss = 0.00126501 Iteration 37, loss = 0.00123765 Iteration 38, loss = 0.00119865 Iteration 39, loss = 0.00117392 Iteration 40, loss = 0.00112388 Iteration 41, loss = 0.00110156 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.16392143 Iteration 2, loss = 0.02812225 Iteration 3, loss = 0.01385084 Iteration 4, loss = 0.00946995 Iteration 5, loss = 0.00732672 Iteration 6, loss = 0.00613011 Iteration 7, loss = 0.00524277 Iteration 8, loss = 0.00468537 Iteration 9, loss = 0.00417074 Iteration 10, loss = 0.00385371 Iteration 11, loss = 0.00360352 Iteration 12, loss = 0.00322462 Iteration 13, loss = 0.00314279 Iteration 14, loss = 0.00288565 Iteration 15, loss = 0.00268751 Iteration 16, loss = 0.00258963 Iteration 17, loss = 0.00230424 Iteration 18, loss = 0.00223405 Iteration 19, loss = 0.00211776 Iteration 20, loss = 0.00211444 Iteration 21, loss = 0.00189655 Iteration 22, loss = 0.00194142 Iteration 23, loss = 0.00183343 Iteration 24, loss = 0.00172694 Iteration 25, loss = 0.00156707 Iteration 26, loss = 0.00166206 Iteration 27, loss = 0.00146595 Iteration 28, loss = 0.00153544 Iteration 29, loss = 0.00151462 Iteration 30, loss = 0.00137741 Iteration 31, loss = 0.00136676 Iteration 32, loss = 0.00123846 Iteration 33, loss = 0.00121058 Iteration 34, loss = 0.00112420 Iteration 35, loss = 0.00117945 Iteration 36, loss = 0.00116221 Iteration 37, loss = 0.00117840 Iteration 38, loss = 0.00101780 Iteration 39, loss = 0.00098433 Iteration 40, loss = 0.00092961 Iteration 41, loss = 0.00097460 Iteration 42, loss = 0.00087592 Iteration 43, loss = 0.00099164 Iteration 44, loss = 0.00084789 Iteration 45, loss = 0.00084070 Iteration 46, loss = 0.00082384 Iteration 47, loss = 0.00080228 Iteration 48, loss = 0.00076295 Iteration 49, loss = 0.00076491 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.15625793 Iteration 2, loss = 0.02620699 Iteration 3, loss = 0.01350982 Iteration 4, loss = 0.00946152 Iteration 5, loss = 0.00768620 Iteration 6, loss = 0.00612888 Iteration 7, loss = 0.00544973 Iteration 8, loss = 0.00486744 Iteration 9, loss = 0.00426506 Iteration 10, loss = 0.00385101 Iteration 11, loss = 0.00349068 Iteration 12, loss = 0.00338114 Iteration 13, loss = 0.00305291 Iteration 14, loss = 0.00289555 Iteration 15, loss = 0.00272436 Iteration 16, loss = 0.00258980 Iteration 17, loss = 0.00237836 Iteration 18, loss = 0.00230505 Iteration 19, loss = 0.00211442 Iteration 20, loss = 0.00202836 Iteration 21, loss = 0.00193651 Iteration 22, loss = 0.00191141 Iteration 23, loss = 0.00179350 Iteration 24, loss = 0.00168177 Iteration 25, loss = 0.00159392 Iteration 26, loss = 0.00150176 Iteration 27, loss = 0.00149504 Iteration 28, loss = 0.00150319 Iteration 29, loss = 0.00140903 Iteration 30, loss = 0.00136507 Iteration 31, loss = 0.00131870 Iteration 32, loss = 0.00122730 Iteration 33, loss = 0.00122868 Iteration 34, loss = 0.00119660 Iteration 35, loss = 0.00114138 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.40010920 Iteration 2, loss = 0.05949071 Iteration 3, loss = 0.02555562 Iteration 4, loss = 0.01558038 Iteration 5, loss = 0.01125076 Iteration 6, loss = 0.00903989 Iteration 7, loss = 0.00747263 Iteration 8, loss = 0.00649183 Iteration 9, loss = 0.00576978 Iteration 10, loss = 0.00502875 Iteration 11, loss = 0.00464091 Iteration 12, loss = 0.00431285 Iteration 13, loss = 0.00395074 Iteration 14, loss = 0.00376673 Iteration 15, loss = 0.00338699 Iteration 16, loss = 0.00325987 Iteration 17, loss = 0.00304507 Iteration 18, loss = 0.00285313 Iteration 19, loss = 0.00277540 Iteration 20, loss = 0.00252216 Iteration 21, loss = 0.00246147 Iteration 22, loss = 0.00239798 Iteration 23, loss = 0.00224678 Iteration 24, loss = 0.00212675 Iteration 25, loss = 0.00217700 Iteration 26, loss = 0.00194314 Iteration 27, loss = 0.00201055 Iteration 28, loss = 0.00191225 Iteration 29, loss = 0.00180925 Iteration 30, loss = 0.00182315 Iteration 31, loss = 0.00158053 Iteration 32, loss = 0.00167912 Iteration 33, loss = 0.00151979 Iteration 34, loss = 0.00160744 Iteration 35, loss = 0.00139990 Iteration 36, loss = 0.00146313 Iteration 37, loss = 0.00133644 Iteration 38, loss = 0.00141338 Iteration 39, loss = 0.00147485 Iteration 40, loss = 0.00129890 Iteration 41, loss = 0.00124869 Iteration 42, loss = 0.00126157 Iteration 43, loss = 0.00121596 Iteration 44, loss = 0.00109442 Iteration 45, loss = 0.00123548 Iteration 46, loss = 0.00103648 Iteration 47, loss = 0.00096395 Iteration 48, loss = 0.00102136 Iteration 49, loss = 0.00104321 Iteration 50, loss = 0.00092434 Iteration 51, loss = 0.00091966 Iteration 52, loss = 0.00089349 Iteration 53, loss = 0.00084609 Iteration 54, loss = 0.00081107 Iteration 55, loss = 0.00080687 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.24327195 Iteration 2, loss = 0.03980398 Iteration 3, loss = 0.01777879 Iteration 4, loss = 0.01155023 Iteration 5, loss = 0.00869201 Iteration 6, loss = 0.00707398 Iteration 7, loss = 0.00600891 Iteration 8, loss = 0.00531529 Iteration 9, loss = 0.00466438 Iteration 10, loss = 0.00433341 Iteration 11, loss = 0.00388098 Iteration 12, loss = 0.00361334 Iteration 13, loss = 0.00336723 Iteration 14, loss = 0.00309085 Iteration 15, loss = 0.00304007 Iteration 16, loss = 0.00277748 Iteration 17, loss = 0.00263848 Iteration 18, loss = 0.00255215 Iteration 19, loss = 0.00238736 Iteration 20, loss = 0.00228960 Iteration 21, loss = 0.00224054 Iteration 22, loss = 0.00208250 Iteration 23, loss = 0.00200142 Iteration 24, loss = 0.00194416 Iteration 25, loss = 0.00182292 Iteration 26, loss = 0.00182812 Iteration 27, loss = 0.00172306 Iteration 28, loss = 0.00159246 Iteration 29, loss = 0.00164911 Iteration 30, loss = 0.00159555 Iteration 31, loss = 0.00152347 Iteration 32, loss = 0.00149371 Iteration 33, loss = 0.00145162 Iteration 34, loss = 0.00138074 Iteration 35, loss = 0.00125856 Iteration 36, loss = 0.00125131 Iteration 37, loss = 0.00122011 Iteration 38, loss = 0.00124660 Iteration 39, loss = 0.00113444 Iteration 40, loss = 0.00120291 Iteration 41, loss = 0.00107424 Iteration 42, loss = 0.00103715 Iteration 43, loss = 0.00102877 Iteration 44, loss = 0.00097836 Iteration 45, loss = 0.00093620 Iteration 46, loss = 0.00092271 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.22373549 Iteration 2, loss = 0.03666272 Iteration 3, loss = 0.01694985 Iteration 4, loss = 0.01107694 Iteration 5, loss = 0.00836988 Iteration 6, loss = 0.00713605 Iteration 7, loss = 0.00587234 Iteration 8, loss = 0.00511405 Iteration 9, loss = 0.00470959 Iteration 10, loss = 0.00421562 Iteration 11, loss = 0.00376748 Iteration 12, loss = 0.00364589 Iteration 13, loss = 0.00329751 Iteration 14, loss = 0.00301457 Iteration 15, loss = 0.00286681 Iteration 16, loss = 0.00266401 Iteration 17, loss = 0.00256644 Iteration 18, loss = 0.00246359 Iteration 19, loss = 0.00218466 Iteration 20, loss = 0.00233495 Iteration 21, loss = 0.00204054 Iteration 22, loss = 0.00195105 Iteration 23, loss = 0.00181337 Iteration 24, loss = 0.00174721 Iteration 25, loss = 0.00170202 Iteration 26, loss = 0.00160025 Iteration 27, loss = 0.00163543 Iteration 28, loss = 0.00179259 Iteration 29, loss = 0.00147538 Iteration 30, loss = 0.00142710 Iteration 31, loss = 0.00139113 Iteration 32, loss = 0.00140035 Iteration 33, loss = 0.00127251 Iteration 34, loss = 0.00131068 Iteration 35, loss = 0.00136679 Iteration 36, loss = 0.00144443 Iteration 37, loss = 0.00108343 Iteration 38, loss = 0.00109833 Iteration 39, loss = 0.00106235 Iteration 40, loss = 0.00105683 Iteration 41, loss = 0.00098489 Iteration 42, loss = 0.00102690 Iteration 43, loss = 0.00101914 Iteration 44, loss = 0.00090073 Iteration 45, loss = 0.00089000 Iteration 46, loss = 0.00087826 Iteration 47, loss = 0.00088750 Iteration 48, loss = 0.00080093 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.29308361 Iteration 2, loss = 0.04514304 Iteration 3, loss = 0.01984643 Iteration 4, loss = 0.01258524 Iteration 5, loss = 0.00936955 Iteration 6, loss = 0.00768868 Iteration 7, loss = 0.00653312 Iteration 8, loss = 0.00568536 Iteration 9, loss = 0.00515144 Iteration 10, loss = 0.00460654 Iteration 11, loss = 0.00427156 Iteration 12, loss = 0.00396866 Iteration 13, loss = 0.00366819 Iteration 14, loss = 0.00339048 Iteration 15, loss = 0.00323558 Iteration 16, loss = 0.00309884 Iteration 17, loss = 0.00286222 Iteration 18, loss = 0.00272970 Iteration 19, loss = 0.00257327 Iteration 20, loss = 0.00241982 Iteration 21, loss = 0.00238157 Iteration 22, loss = 0.00225963 Iteration 23, loss = 0.00221652 Iteration 24, loss = 0.00214370 Iteration 25, loss = 0.00192632 Iteration 26, loss = 0.00190603 Iteration 27, loss = 0.00178156 Iteration 28, loss = 0.00175942 Iteration 29, loss = 0.00160294 Iteration 30, loss = 0.00165723 Iteration 31, loss = 0.00152262 Iteration 32, loss = 0.00145253 Iteration 33, loss = 0.00140901 Iteration 34, loss = 0.00139083 Iteration 35, loss = 0.00130878 Iteration 36, loss = 0.00125494 Iteration 37, loss = 0.00123014 Iteration 38, loss = 0.00117634 Iteration 39, loss = 0.00144957 Iteration 40, loss = 0.00109117 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.26005017 Iteration 2, loss = 0.04004144 Iteration 3, loss = 0.01789901 Iteration 4, loss = 0.01133630 Iteration 5, loss = 0.00849373 Iteration 6, loss = 0.00702361 Iteration 7, loss = 0.00589574 Iteration 8, loss = 0.00518708 Iteration 9, loss = 0.00468186 Iteration 10, loss = 0.00421110 Iteration 11, loss = 0.00387722 Iteration 12, loss = 0.00358107 Iteration 13, loss = 0.00343021 Iteration 14, loss = 0.00301680 Iteration 15, loss = 0.00296370 Iteration 16, loss = 0.00274965 Iteration 17, loss = 0.00253820 Iteration 18, loss = 0.00256205 Iteration 19, loss = 0.00238266 Iteration 20, loss = 0.00230630 Iteration 21, loss = 0.00220002 Iteration 22, loss = 0.00211556 Iteration 23, loss = 0.00195834 Iteration 24, loss = 0.00193077 Iteration 25, loss = 0.00180135 Iteration 26, loss = 0.00185647 Iteration 27, loss = 0.00176413 Iteration 28, loss = 0.00162582 Iteration 29, loss = 0.00163296 Iteration 30, loss = 0.00154203 Iteration 31, loss = 0.00167225 Iteration 32, loss = 0.00156579 Iteration 33, loss = 0.00153918 Iteration 34, loss = 0.00133968 Iteration 35, loss = 0.00129425 Iteration 36, loss = 0.00122817 Iteration 37, loss = 0.00117648 Iteration 38, loss = 0.00139720 Iteration 39, loss = 0.00140856 Iteration 40, loss = 0.00119614 Iteration 41, loss = 0.00103662 Iteration 42, loss = 0.00103486 Iteration 43, loss = 0.00102467 Iteration 44, loss = 0.00100688 Iteration 45, loss = 0.00095526 Iteration 46, loss = 0.00090778 Iteration 47, loss = 0.00088138 Iteration 48, loss = 0.00085206 Iteration 49, loss = 0.00090239 Iteration 50, loss = 0.00098858 Iteration 51, loss = 0.00082022 Iteration 52, loss = 0.00077932 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.36814882 Iteration 2, loss = 0.10975595 Iteration 3, loss = 0.05077611 Iteration 4, loss = 0.03035197 Iteration 5, loss = 0.02076226 Iteration 6, loss = 0.01563049 Iteration 7, loss = 0.01237589 Iteration 8, loss = 0.01031654 Iteration 9, loss = 0.00862049 Iteration 10, loss = 0.00736367 Iteration 11, loss = 0.00684014 Iteration 12, loss = 0.00629178 Iteration 13, loss = 0.00538881 Iteration 14, loss = 0.00493109 Iteration 15, loss = 0.00493011 Iteration 16, loss = 0.00446761 Iteration 17, loss = 0.00396806 Iteration 18, loss = 0.00398126 Iteration 19, loss = 0.00377345 Iteration 20, loss = 0.00323405 Iteration 21, loss = 0.00304783 Iteration 22, loss = 0.00345670 Iteration 23, loss = 0.00297926 Iteration 24, loss = 0.00275835 Iteration 25, loss = 0.00251313 Iteration 26, loss = 0.00241232 Iteration 27, loss = 0.00232004 Iteration 28, loss = 0.00223062 Iteration 29, loss = 0.00280333 Iteration 30, loss = 0.00210246 Iteration 31, loss = 0.00247841 Iteration 32, loss = 0.00201275 Iteration 33, loss = 0.00188021 Iteration 34, loss = 0.00173840 Iteration 35, loss = 0.00172532 Iteration 36, loss = 0.00162895 Iteration 37, loss = 0.00159955 Iteration 38, loss = 0.00177863 Iteration 39, loss = 0.00187719 Iteration 40, loss = 0.00144685 Iteration 41, loss = 0.00137080 Iteration 42, loss = 0.00130523 Iteration 43, loss = 0.00125632 Iteration 44, loss = 0.00124231 Iteration 45, loss = 0.00119298 Iteration 46, loss = 0.00119102 Iteration 47, loss = 0.00120287 Iteration 48, loss = 0.00110760 Iteration 49, loss = 0.00118017 Iteration 50, loss = 0.00110257 Iteration 51, loss = 0.00118935 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.34438223 Iteration 2, loss = 0.09957195 Iteration 3, loss = 0.04606434 Iteration 4, loss = 0.02769749 Iteration 5, loss = 0.01916708 Iteration 6, loss = 0.01432623 Iteration 7, loss = 0.01137400 Iteration 8, loss = 0.00935868 Iteration 9, loss = 0.00800426 Iteration 10, loss = 0.00684368 Iteration 11, loss = 0.00612783 Iteration 12, loss = 0.00538539 Iteration 13, loss = 0.00485220 Iteration 14, loss = 0.00442330 Iteration 15, loss = 0.00405742 Iteration 16, loss = 0.00375938 Iteration 17, loss = 0.00346939 Iteration 18, loss = 0.00352276 Iteration 19, loss = 0.00307469 Iteration 20, loss = 0.00289984 Iteration 21, loss = 0.00275251 Iteration 22, loss = 0.00275234 Iteration 23, loss = 0.00249722 Iteration 24, loss = 0.00235642 Iteration 25, loss = 0.00229521 Iteration 26, loss = 0.00223057 Iteration 27, loss = 0.00208504 Iteration 28, loss = 0.00209450 Iteration 29, loss = 0.00206289 Iteration 30, loss = 0.00193871 Iteration 31, loss = 0.00182111 Iteration 32, loss = 0.00184983 Iteration 33, loss = 0.00171718 Iteration 34, loss = 0.00173056 Iteration 35, loss = 0.00154595 Iteration 36, loss = 0.00150300 Iteration 37, loss = 0.00145382 Iteration 38, loss = 0.00140148 Iteration 39, loss = 0.00138650 Iteration 40, loss = 0.00134800 Iteration 41, loss = 0.00131938 Iteration 42, loss = 0.00123581 Iteration 43, loss = 0.00120070 Iteration 44, loss = 0.00117077 Iteration 45, loss = 0.00113489 Iteration 46, loss = 0.00110484 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.26146404 Iteration 2, loss = 0.07932006 Iteration 3, loss = 0.03663037 Iteration 4, loss = 0.02138941 Iteration 5, loss = 0.01451148 Iteration 6, loss = 0.01058411 Iteration 7, loss = 0.00815577 Iteration 8, loss = 0.00660247 Iteration 9, loss = 0.00544530 Iteration 10, loss = 0.00461503 Iteration 11, loss = 0.00402706 Iteration 12, loss = 0.00354164 Iteration 13, loss = 0.00322558 Iteration 14, loss = 0.00288850 Iteration 15, loss = 0.00263882 Iteration 16, loss = 0.00243692 Iteration 17, loss = 0.00226179 Iteration 18, loss = 0.00212012 Iteration 19, loss = 0.00200785 Iteration 20, loss = 0.00189856 Iteration 21, loss = 0.00180424 Iteration 22, loss = 0.00171176 Iteration 23, loss = 0.00163445 Iteration 24, loss = 0.00156082 Iteration 25, loss = 0.00151198 Iteration 26, loss = 0.00144360 Iteration 27, loss = 0.00139220 Iteration 28, loss = 0.00133988 Iteration 29, loss = 0.00129014 Iteration 30, loss = 0.00125411 Iteration 31, loss = 0.00120852 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.30553126 Iteration 2, loss = 0.08748797 Iteration 3, loss = 0.04153647 Iteration 4, loss = 0.02553914 Iteration 5, loss = 0.01824658 Iteration 6, loss = 0.01391043 Iteration 7, loss = 0.01106567 Iteration 8, loss = 0.00944760 Iteration 9, loss = 0.00792534 Iteration 10, loss = 0.00684963 Iteration 11, loss = 0.00631159 Iteration 12, loss = 0.00566151 Iteration 13, loss = 0.00540411 Iteration 14, loss = 0.00471738 Iteration 15, loss = 0.00419174 Iteration 16, loss = 0.00390137 Iteration 17, loss = 0.00366426 Iteration 18, loss = 0.00333776 Iteration 19, loss = 0.00314165 Iteration 20, loss = 0.00290679 Iteration 21, loss = 0.00277452 Iteration 22, loss = 0.00275712 Iteration 23, loss = 0.00249778 Iteration 24, loss = 0.00242220 Iteration 25, loss = 0.00226298 Iteration 26, loss = 0.00220700 Iteration 27, loss = 0.00215227 Iteration 28, loss = 0.00195252 Iteration 29, loss = 0.00189605 Iteration 30, loss = 0.00198818 Iteration 31, loss = 0.00200369 Iteration 32, loss = 0.00166479 Iteration 33, loss = 0.00162178 Iteration 34, loss = 0.00158184 Iteration 35, loss = 0.00153473 Iteration 36, loss = 0.00149864 Iteration 37, loss = 0.00148452 Iteration 38, loss = 0.00138445 Iteration 39, loss = 0.00135156 Iteration 40, loss = 0.00132233 Iteration 41, loss = 0.00136539 Iteration 42, loss = 0.00125223 Iteration 43, loss = 0.00125958 Iteration 44, loss = 0.00155710 Iteration 45, loss = 0.00118326 Iteration 46, loss = 0.00111169 Iteration 47, loss = 0.00108088 Iteration 48, loss = 0.00104062 Iteration 49, loss = 0.00103184 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.34805900 Iteration 2, loss = 0.10516435 Iteration 3, loss = 0.04795593 Iteration 4, loss = 0.02815814 Iteration 5, loss = 0.01922484 Iteration 6, loss = 0.01432767 Iteration 7, loss = 0.01140293 Iteration 8, loss = 0.00942499 Iteration 9, loss = 0.00784237 Iteration 10, loss = 0.00680146 Iteration 11, loss = 0.00623711 Iteration 12, loss = 0.00565562 Iteration 13, loss = 0.00488113 Iteration 14, loss = 0.00450642 Iteration 15, loss = 0.00426614 Iteration 16, loss = 0.00385819 Iteration 17, loss = 0.00377293 Iteration 18, loss = 0.00346945 Iteration 19, loss = 0.00335197 Iteration 20, loss = 0.00321914 Iteration 21, loss = 0.00310703 Iteration 22, loss = 0.00283642 Iteration 23, loss = 0.00266580 Iteration 24, loss = 0.00265961 Iteration 25, loss = 0.00251200 Iteration 26, loss = 0.00246851 Iteration 27, loss = 0.00222123 Iteration 28, loss = 0.00230740 Iteration 29, loss = 0.00207130 Iteration 30, loss = 0.00203877 Iteration 31, loss = 0.00196716 Iteration 32, loss = 0.00197744 Iteration 33, loss = 0.00176461 Iteration 34, loss = 0.00177056 Iteration 35, loss = 0.00163195 Iteration 36, loss = 0.00162937 Iteration 37, loss = 0.00170240 Iteration 38, loss = 0.00153083 Iteration 39, loss = 0.00145951 Iteration 40, loss = 0.00148457 Iteration 41, loss = 0.00139845 Iteration 42, loss = 0.00131053 Iteration 43, loss = 0.00130295 Iteration 44, loss = 0.00136807 Iteration 45, loss = 0.00123612 Iteration 46, loss = 0.00121846 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.44300951 Iteration 2, loss = 0.13658446 Iteration 3, loss = 0.06284636 Iteration 4, loss = 0.03602262 Iteration 5, loss = 0.02421082 Iteration 6, loss = 0.01782362 Iteration 7, loss = 0.01381559 Iteration 8, loss = 0.01132164 Iteration 9, loss = 0.00936901 Iteration 10, loss = 0.00806028 Iteration 11, loss = 0.00708352 Iteration 12, loss = 0.00636164 Iteration 13, loss = 0.00572052 Iteration 14, loss = 0.00521516 Iteration 15, loss = 0.00471389 Iteration 16, loss = 0.00437967 Iteration 17, loss = 0.00406517 Iteration 18, loss = 0.00385728 Iteration 19, loss = 0.00364129 Iteration 20, loss = 0.00361075 Iteration 21, loss = 0.00344494 Iteration 22, loss = 0.00314634 Iteration 23, loss = 0.00296689 Iteration 24, loss = 0.00275088 Iteration 25, loss = 0.00269057 Iteration 26, loss = 0.00265861 Iteration 27, loss = 0.00242891 Iteration 28, loss = 0.00249934 Iteration 29, loss = 0.00227448 Iteration 30, loss = 0.00241098 Iteration 31, loss = 0.00225317 Iteration 32, loss = 0.00198959 Iteration 33, loss = 0.00208101 Iteration 34, loss = 0.00200337 Iteration 35, loss = 0.00201916 Iteration 36, loss = 0.00178276 Iteration 37, loss = 0.00173093 Iteration 38, loss = 0.00180668 Iteration 39, loss = 0.00180336 Iteration 40, loss = 0.00159367 Iteration 41, loss = 0.00168415 Iteration 42, loss = 0.00151208 Iteration 43, loss = 0.00160722 Iteration 44, loss = 0.00146734 Iteration 45, loss = 0.00145149 Iteration 46, loss = 0.00168256 Iteration 47, loss = 0.00132772 Iteration 48, loss = 0.00126234 Iteration 49, loss = 0.00135220 Iteration 50, loss = 0.00123107 Iteration 51, loss = 0.00128489 Iteration 52, loss = 0.00116570 Iteration 53, loss = 0.00122760 Iteration 54, loss = 0.00118607 Iteration 55, loss = 0.00112263 Iteration 56, loss = 0.00101298 Iteration 57, loss = 0.00106804 Iteration 58, loss = 0.00101468 Iteration 59, loss = 0.00098974 Iteration 60, loss = 0.00097611 Iteration 61, loss = 0.00093075 Iteration 62, loss = 0.00090075 Iteration 63, loss = 0.00086149 Iteration 64, loss = 0.00092408 Iteration 65, loss = 0.00086889 Iteration 66, loss = 0.00088014 Iteration 67, loss = 0.00082689 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.40286665 Iteration 2, loss = 0.11246982 Iteration 3, loss = 0.05176815 Iteration 4, loss = 0.03096574 Iteration 5, loss = 0.02105199 Iteration 6, loss = 0.01556566 Iteration 7, loss = 0.01224459 Iteration 8, loss = 0.00984921 Iteration 9, loss = 0.00818995 Iteration 10, loss = 0.00716307 Iteration 11, loss = 0.00610689 Iteration 12, loss = 0.00557979 Iteration 13, loss = 0.00495014 Iteration 14, loss = 0.00427482 Iteration 15, loss = 0.00423850 Iteration 16, loss = 0.00367760 Iteration 17, loss = 0.00339114 Iteration 18, loss = 0.00388769 Iteration 19, loss = 0.00312562 Iteration 20, loss = 0.00266084 Iteration 21, loss = 0.00263329 Iteration 22, loss = 0.00248757 Iteration 23, loss = 0.00253198 Iteration 24, loss = 0.00220994 Iteration 25, loss = 0.00215435 Iteration 26, loss = 0.00193863 Iteration 27, loss = 0.00182707 Iteration 28, loss = 0.00178634 Iteration 29, loss = 0.00243455 Iteration 30, loss = 0.00259191 Iteration 31, loss = 0.00160165 Iteration 32, loss = 0.00149027 Iteration 33, loss = 0.00165952 Iteration 34, loss = 0.00128166 Iteration 35, loss = 0.00138167 Iteration 36, loss = 0.00124708 Iteration 37, loss = 0.00124556 Iteration 38, loss = 0.00109736 Iteration 39, loss = 0.00104465 Iteration 40, loss = 0.00101470 Iteration 41, loss = 0.00096362 Iteration 42, loss = 0.00094752 Iteration 43, loss = 0.00091692 Iteration 44, loss = 0.00095961 Iteration 45, loss = 0.00102558 Iteration 46, loss = 0.00083541 Iteration 47, loss = 0.00081193 Iteration 48, loss = 0.00079255 Iteration 49, loss = 0.00075564 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.50458973 Iteration 2, loss = 0.15355840 Iteration 3, loss = 0.07097406 Iteration 4, loss = 0.04159184 Iteration 5, loss = 0.02800743 Iteration 6, loss = 0.02054393 Iteration 7, loss = 0.01600048 Iteration 8, loss = 0.01292496 Iteration 9, loss = 0.01087799 Iteration 10, loss = 0.00915832 Iteration 11, loss = 0.00797224 Iteration 12, loss = 0.00729709 Iteration 13, loss = 0.00645529 Iteration 14, loss = 0.00579648 Iteration 15, loss = 0.00522730 Iteration 16, loss = 0.00490330 Iteration 17, loss = 0.00462796 Iteration 18, loss = 0.00415875 Iteration 19, loss = 0.00390875 Iteration 20, loss = 0.00370343 Iteration 21, loss = 0.00359137 Iteration 22, loss = 0.00332776 Iteration 23, loss = 0.00311629 Iteration 24, loss = 0.00293525 Iteration 25, loss = 0.00286119 Iteration 26, loss = 0.00306139 Iteration 27, loss = 0.00254583 Iteration 28, loss = 0.00243710 Iteration 29, loss = 0.00236635 Iteration 30, loss = 0.00226107 Iteration 31, loss = 0.00219015 Iteration 32, loss = 0.00215174 Iteration 33, loss = 0.00206755 Iteration 34, loss = 0.00196256 Iteration 35, loss = 0.00190890 Iteration 36, loss = 0.00182578 Iteration 37, loss = 0.00183723 Iteration 38, loss = 0.00221315 Iteration 39, loss = 0.00169388 Iteration 40, loss = 0.00160710 Iteration 41, loss = 0.00161430 Iteration 42, loss = 0.00163433 Iteration 43, loss = 0.00175969 Iteration 44, loss = 0.00162776 Iteration 45, loss = 0.00139576 Iteration 46, loss = 0.00135354 Iteration 47, loss = 0.00133210 Iteration 48, loss = 0.00128910 Iteration 49, loss = 0.00126001 Iteration 50, loss = 0.00123862 Iteration 51, loss = 0.00120348 Iteration 52, loss = 0.00116801 Iteration 53, loss = 0.00114220 Iteration 54, loss = 0.00110998 Iteration 55, loss = 0.00108343 Iteration 56, loss = 0.00107265 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.35438281 Iteration 2, loss = 0.10905388 Iteration 3, loss = 0.05314703 Iteration 4, loss = 0.03261226 Iteration 5, loss = 0.02255860 Iteration 6, loss = 0.01691857 Iteration 7, loss = 0.01352987 Iteration 8, loss = 0.01097444 Iteration 9, loss = 0.00932007 Iteration 10, loss = 0.00835161 Iteration 11, loss = 0.00720453 Iteration 12, loss = 0.00620409 Iteration 13, loss = 0.00594159 Iteration 14, loss = 0.00591973 Iteration 15, loss = 0.00480465 Iteration 16, loss = 0.00443036 Iteration 17, loss = 0.00441836 Iteration 18, loss = 0.00384919 Iteration 19, loss = 0.00381826 Iteration 20, loss = 0.00335861 Iteration 21, loss = 0.00318465 Iteration 22, loss = 0.00301264 Iteration 23, loss = 0.00301896 Iteration 24, loss = 0.00272216 Iteration 25, loss = 0.00267362 Iteration 26, loss = 0.00251784 Iteration 27, loss = 0.00248039 Iteration 28, loss = 0.00239565 Iteration 29, loss = 0.00229517 Iteration 30, loss = 0.00212530 Iteration 31, loss = 0.00205718 Iteration 32, loss = 0.00196746 Iteration 33, loss = 0.00190270 Iteration 34, loss = 0.00186734 Iteration 35, loss = 0.00176303 Iteration 36, loss = 0.00176135 Iteration 37, loss = 0.00164305 Iteration 38, loss = 0.00159417 Iteration 39, loss = 0.00161932 Iteration 40, loss = 0.00153080 Iteration 41, loss = 0.00142931 Iteration 42, loss = 0.00145758 Iteration 43, loss = 0.00135540 Iteration 44, loss = 0.00133486 Iteration 45, loss = 0.00126870 Iteration 46, loss = 0.00124545 Iteration 47, loss = 0.00120791 Iteration 48, loss = 0.00136800 Iteration 49, loss = 0.00142345 Iteration 50, loss = 0.00137595 Iteration 51, loss = 0.00107842 Iteration 52, loss = 0.00104613 Iteration 53, loss = 0.00101818 Iteration 54, loss = 0.00098723 Iteration 55, loss = 0.00096178 Iteration 56, loss = 0.00094093 Iteration 57, loss = 0.00091738 Iteration 58, loss = 0.00089751 Iteration 59, loss = 0.00087915 Iteration 60, loss = 0.00085037 Iteration 61, loss = 0.00084435 Iteration 62, loss = 0.00082414 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.40536135 Iteration 2, loss = 0.12103586 Iteration 3, loss = 0.05496732 Iteration 4, loss = 0.03257329 Iteration 5, loss = 0.02232249 Iteration 6, loss = 0.01678950 Iteration 7, loss = 0.01362783 Iteration 8, loss = 0.01098833 Iteration 9, loss = 0.00914792 Iteration 10, loss = 0.00799281 Iteration 11, loss = 0.00724284 Iteration 12, loss = 0.00630961 Iteration 13, loss = 0.00568439 Iteration 14, loss = 0.00507565 Iteration 15, loss = 0.00468095 Iteration 16, loss = 0.00412833 Iteration 17, loss = 0.00397220 Iteration 18, loss = 0.00373249 Iteration 19, loss = 0.00339012 Iteration 20, loss = 0.00334033 Iteration 21, loss = 0.00307621 Iteration 22, loss = 0.00297453 Iteration 23, loss = 0.00277613 Iteration 24, loss = 0.00272772 Iteration 25, loss = 0.00255346 Iteration 26, loss = 0.00239931 Iteration 27, loss = 0.00242690 Iteration 28, loss = 0.00238777 Iteration 29, loss = 0.00222967 Iteration 30, loss = 0.00230268 Iteration 31, loss = 0.00211590 Iteration 32, loss = 0.00187564 Iteration 33, loss = 0.00181368 Iteration 34, loss = 0.00177195 Iteration 35, loss = 0.00174551 Iteration 36, loss = 0.00162642 Iteration 37, loss = 0.00161511 Iteration 38, loss = 0.00156985 Iteration 39, loss = 0.00148778 Iteration 40, loss = 0.00144380 Iteration 41, loss = 0.00146786 Iteration 42, loss = 0.00136185 Iteration 43, loss = 0.00131392 Iteration 44, loss = 0.00128070 Iteration 45, loss = 0.00125984 Iteration 46, loss = 0.00121431 Iteration 47, loss = 0.00119131 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.36058243 Iteration 2, loss = 0.10722402 Iteration 3, loss = 0.04977469 Iteration 4, loss = 0.02991533 Iteration 5, loss = 0.02048535 Iteration 6, loss = 0.01533789 Iteration 7, loss = 0.01216245 Iteration 8, loss = 0.00991693 Iteration 9, loss = 0.00850757 Iteration 10, loss = 0.00764157 Iteration 11, loss = 0.00656714 Iteration 12, loss = 0.00580461 Iteration 13, loss = 0.00535148 Iteration 14, loss = 0.00511407 Iteration 15, loss = 0.00441430 Iteration 16, loss = 0.00446084 Iteration 17, loss = 0.00394771 Iteration 18, loss = 0.00372988 Iteration 19, loss = 0.00351796 Iteration 20, loss = 0.00317287 Iteration 21, loss = 0.00302776 Iteration 22, loss = 0.00292608 Iteration 23, loss = 0.00307707 Iteration 24, loss = 0.00260247 Iteration 25, loss = 0.00253467 Iteration 26, loss = 0.00252994 Iteration 27, loss = 0.00230556 Iteration 28, loss = 0.00226128 Iteration 29, loss = 0.00211843 Iteration 30, loss = 0.00213533 Iteration 31, loss = 0.00216291 Iteration 32, loss = 0.00190845 Iteration 33, loss = 0.00186954 Iteration 34, loss = 0.00180332 Iteration 35, loss = 0.00170049 Iteration 36, loss = 0.00170326 Iteration 37, loss = 0.00165749 Iteration 38, loss = 0.00163487 Iteration 39, loss = 0.00155510 Iteration 40, loss = 0.00153138 Iteration 41, loss = 0.00142589 Iteration 42, loss = 0.00138650 Iteration 43, loss = 0.00136824 Iteration 44, loss = 0.00137890 Iteration 45, loss = 0.00126149 Iteration 46, loss = 0.00123072 Iteration 47, loss = 0.00121151 Iteration 48, loss = 0.00117143 Iteration 49, loss = 0.00114374 Iteration 50, loss = 0.00111440 Iteration 51, loss = 0.00106472 Iteration 52, loss = 0.00103973 Iteration 53, loss = 0.00102594 Iteration 54, loss = 0.00100312 Iteration 55, loss = 0.00099295 Iteration 56, loss = 0.00098057 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.40080655 Iteration 2, loss = 0.12547123 Iteration 3, loss = 0.06050936 Iteration 4, loss = 0.03624410 Iteration 5, loss = 0.02471613 Iteration 6, loss = 0.01835996 Iteration 7, loss = 0.01485595 Iteration 8, loss = 0.01191860 Iteration 9, loss = 0.00976046 Iteration 10, loss = 0.00865832 Iteration 11, loss = 0.00743498 Iteration 12, loss = 0.00678684 Iteration 13, loss = 0.00588461 Iteration 14, loss = 0.00533995 Iteration 15, loss = 0.00518889 Iteration 16, loss = 0.00449470 Iteration 17, loss = 0.00408988 Iteration 18, loss = 0.00387913 Iteration 19, loss = 0.00357665 Iteration 20, loss = 0.00344997 Iteration 21, loss = 0.00319929 Iteration 22, loss = 0.00302563 Iteration 23, loss = 0.00297524 Iteration 24, loss = 0.00294734 Iteration 25, loss = 0.00261042 Iteration 26, loss = 0.00248999 Iteration 27, loss = 0.00255020 Iteration 28, loss = 0.00241172 Iteration 29, loss = 0.00217868 Iteration 30, loss = 0.00213988 Iteration 31, loss = 0.00205130 Iteration 32, loss = 0.00195056 Iteration 33, loss = 0.00193025 Iteration 34, loss = 0.00202947 Iteration 35, loss = 0.00176924 Iteration 36, loss = 0.00174634 Iteration 37, loss = 0.00165339 Iteration 38, loss = 0.00157724 Iteration 39, loss = 0.00152514 Iteration 40, loss = 0.00148937 Iteration 41, loss = 0.00143056 Iteration 42, loss = 0.00139832 Iteration 43, loss = 0.00140348 Iteration 44, loss = 0.00140208 Iteration 45, loss = 0.00135367 Iteration 46, loss = 0.00122816 Iteration 47, loss = 0.00118430 Iteration 48, loss = 0.00115342 Iteration 49, loss = 0.00112400 Iteration 50, loss = 0.00110103 Iteration 51, loss = 0.00106590 Iteration 52, loss = 0.00104322 Iteration 53, loss = 0.00102746 Iteration 54, loss = 0.00100116 Iteration 55, loss = 0.00097630 Iteration 56, loss = 0.00095522 Iteration 57, loss = 0.00091691 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.31281279 Iteration 2, loss = 0.09074137 Iteration 3, loss = 0.04286710 Iteration 4, loss = 0.02619307 Iteration 5, loss = 0.01827448 Iteration 6, loss = 0.01374792 Iteration 7, loss = 0.01108547 Iteration 8, loss = 0.00933227 Iteration 9, loss = 0.00768920 Iteration 10, loss = 0.00675460 Iteration 11, loss = 0.00596853 Iteration 12, loss = 0.00526199 Iteration 13, loss = 0.00504603 Iteration 14, loss = 0.00457347 Iteration 15, loss = 0.00405522 Iteration 16, loss = 0.00410149 Iteration 17, loss = 0.00355433 Iteration 18, loss = 0.00321889 Iteration 19, loss = 0.00310168 Iteration 20, loss = 0.00303526 Iteration 21, loss = 0.00272830 Iteration 22, loss = 0.00265071 Iteration 23, loss = 0.00245661 Iteration 24, loss = 0.00228137 Iteration 25, loss = 0.00223556 Iteration 26, loss = 0.00212344 Iteration 27, loss = 0.00214818 Iteration 28, loss = 0.00210542 Iteration 29, loss = 0.00206505 Iteration 30, loss = 0.00178128 Iteration 31, loss = 0.00171269 Iteration 32, loss = 0.00165265 Iteration 33, loss = 0.00161241 Iteration 34, loss = 0.00157069 Iteration 35, loss = 0.00148583 Iteration 36, loss = 0.00151888 Iteration 37, loss = 0.00154227 Iteration 38, loss = 0.00140134 Iteration 39, loss = 0.00130894 Iteration 40, loss = 0.00134575 Iteration 41, loss = 0.00133264 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.53760374 Iteration 2, loss = 0.15911682 Iteration 3, loss = 0.07091470 Iteration 4, loss = 0.04102824 Iteration 5, loss = 0.02763608 Iteration 6, loss = 0.02024100 Iteration 7, loss = 0.01577777 Iteration 8, loss = 0.01264308 Iteration 9, loss = 0.01056189 Iteration 10, loss = 0.00916186 Iteration 11, loss = 0.00799557 Iteration 12, loss = 0.00707050 Iteration 13, loss = 0.00634998 Iteration 14, loss = 0.00571680 Iteration 15, loss = 0.00524341 Iteration 16, loss = 0.00493846 Iteration 17, loss = 0.00469521 Iteration 18, loss = 0.00428745 Iteration 19, loss = 0.00405491 Iteration 20, loss = 0.00373593 Iteration 21, loss = 0.00367697 Iteration 22, loss = 0.00351765 Iteration 23, loss = 0.00331503 Iteration 24, loss = 0.00325944 Iteration 25, loss = 0.00306098 Iteration 26, loss = 0.00283975 Iteration 27, loss = 0.00270386 Iteration 28, loss = 0.00259992 Iteration 29, loss = 0.00260628 Iteration 30, loss = 0.00250957 Iteration 31, loss = 0.00226546 Iteration 32, loss = 0.00222745 Iteration 33, loss = 0.00215328 Iteration 34, loss = 0.00214218 Iteration 35, loss = 0.00200029 Iteration 36, loss = 0.00195176 Iteration 37, loss = 0.00185518 Iteration 38, loss = 0.00180498 Iteration 39, loss = 0.00175578 Iteration 40, loss = 0.00171040 Iteration 41, loss = 0.00166611 Iteration 42, loss = 0.00159958 Iteration 43, loss = 0.00157172 Iteration 44, loss = 0.00151253 Iteration 45, loss = 0.00147543 Iteration 46, loss = 0.00146014 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.65957732 Iteration 2, loss = 0.20744311 Iteration 3, loss = 0.09621142 Iteration 4, loss = 0.05571382 Iteration 5, loss = 0.03695177 Iteration 6, loss = 0.02642209 Iteration 7, loss = 0.02032353 Iteration 8, loss = 0.01628973 Iteration 9, loss = 0.01333072 Iteration 10, loss = 0.01134999 Iteration 11, loss = 0.00978896 Iteration 12, loss = 0.00862886 Iteration 13, loss = 0.00759592 Iteration 14, loss = 0.00688452 Iteration 15, loss = 0.00618674 Iteration 16, loss = 0.00571506 Iteration 17, loss = 0.00521771 Iteration 18, loss = 0.00489054 Iteration 19, loss = 0.00461134 Iteration 20, loss = 0.00425667 Iteration 21, loss = 0.00405458 Iteration 22, loss = 0.00378377 Iteration 23, loss = 0.00356455 Iteration 24, loss = 0.00336591 Iteration 25, loss = 0.00323303 Iteration 26, loss = 0.00315602 Iteration 27, loss = 0.00303226 Iteration 28, loss = 0.00277947 Iteration 29, loss = 0.00267579 Iteration 30, loss = 0.00257127 Iteration 31, loss = 0.00255737 Iteration 32, loss = 0.00271961 Iteration 33, loss = 0.00226566 Iteration 34, loss = 0.00214544 Iteration 35, loss = 0.00210999 Iteration 36, loss = 0.00204897 Iteration 37, loss = 0.00195773 Iteration 38, loss = 0.00186444 Iteration 39, loss = 0.00188352 Iteration 40, loss = 0.00180752 Iteration 41, loss = 0.00183552 Iteration 42, loss = 0.00176902 Iteration 43, loss = 0.00164225 Iteration 44, loss = 0.00156971 Iteration 45, loss = 0.00153071 Iteration 46, loss = 0.00153304 Iteration 47, loss = 0.00144014 Iteration 48, loss = 0.00140043 Iteration 49, loss = 0.00136725 Iteration 50, loss = 0.00138493 Iteration 51, loss = 0.00132848 Iteration 52, loss = 0.00130235 Iteration 53, loss = 0.00126125 Iteration 54, loss = 0.00157335 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.41456323 Iteration 2, loss = 0.12642036 Iteration 3, loss = 0.05874462 Iteration 4, loss = 0.03474419 Iteration 5, loss = 0.02373209 Iteration 6, loss = 0.01744917 Iteration 7, loss = 0.01354236 Iteration 8, loss = 0.01115487 Iteration 9, loss = 0.00920483 Iteration 10, loss = 0.00801327 Iteration 11, loss = 0.00695674 Iteration 12, loss = 0.00615101 Iteration 13, loss = 0.00563977 Iteration 14, loss = 0.00512973 Iteration 15, loss = 0.00475840 Iteration 16, loss = 0.00464229 Iteration 17, loss = 0.00405761 Iteration 18, loss = 0.00386218 Iteration 19, loss = 0.00354806 Iteration 20, loss = 0.00349771 Iteration 21, loss = 0.00325764 Iteration 22, loss = 0.00306552 Iteration 23, loss = 0.00283780 Iteration 24, loss = 0.00301348 Iteration 25, loss = 0.00283730 Iteration 26, loss = 0.00257267 Iteration 27, loss = 0.00239342 Iteration 28, loss = 0.00241176 Iteration 29, loss = 0.00219247 Iteration 30, loss = 0.00208247 Iteration 31, loss = 0.00209334 Iteration 32, loss = 0.00217678 Iteration 33, loss = 0.00189833 Iteration 34, loss = 0.00180138 Iteration 35, loss = 0.00175948 Iteration 36, loss = 0.00175202 Iteration 37, loss = 0.00167545 Iteration 38, loss = 0.00156190 Iteration 39, loss = 0.00151459 Iteration 40, loss = 0.00145244 Iteration 41, loss = 0.00140961 Iteration 42, loss = 0.00139691 Iteration 43, loss = 0.00147873 Iteration 44, loss = 0.00129160 Iteration 45, loss = 0.00126226 Iteration 46, loss = 0.00121045 Iteration 47, loss = 0.00119762 Iteration 48, loss = 0.00119705 Iteration 49, loss = 0.00110279 Iteration 50, loss = 0.00111400 Iteration 51, loss = 0.00107957 Iteration 52, loss = 0.00103134 Iteration 53, loss = 0.00100106 Iteration 54, loss = 0.00097161 Iteration 55, loss = 0.00095168 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.48825049 Iteration 2, loss = 0.15302782 Iteration 3, loss = 0.07192774 Iteration 4, loss = 0.04234616 Iteration 5, loss = 0.02853217 Iteration 6, loss = 0.02073987 Iteration 7, loss = 0.01608898 Iteration 8, loss = 0.01301646 Iteration 9, loss = 0.01078788 Iteration 10, loss = 0.00942633 Iteration 11, loss = 0.00820096 Iteration 12, loss = 0.00712678 Iteration 13, loss = 0.00644549 Iteration 14, loss = 0.00583047 Iteration 15, loss = 0.00546566 Iteration 16, loss = 0.00497833 Iteration 17, loss = 0.00454202 Iteration 18, loss = 0.00421408 Iteration 19, loss = 0.00398290 Iteration 20, loss = 0.00393221 Iteration 21, loss = 0.00356302 Iteration 22, loss = 0.00336601 Iteration 23, loss = 0.00325738 Iteration 24, loss = 0.00322156 Iteration 25, loss = 0.00295280 Iteration 26, loss = 0.00275841 Iteration 27, loss = 0.00274621 Iteration 28, loss = 0.00253704 Iteration 29, loss = 0.00259343 Iteration 30, loss = 0.00237101 Iteration 31, loss = 0.00233661 Iteration 32, loss = 0.00225206 Iteration 33, loss = 0.00227159 Iteration 34, loss = 0.00215890 Iteration 35, loss = 0.00221251 Iteration 36, loss = 0.00193922 Iteration 37, loss = 0.00192978 Iteration 38, loss = 0.00183155 Iteration 39, loss = 0.00171529 Iteration 40, loss = 0.00174161 Iteration 41, loss = 0.00165882 Iteration 42, loss = 0.00161313 Iteration 43, loss = 0.00160154 Iteration 44, loss = 0.00156361 Iteration 45, loss = 0.00149889 Iteration 46, loss = 0.00150210 Iteration 47, loss = 0.00141592 Iteration 48, loss = 0.00135699 Iteration 49, loss = 0.00133433 Iteration 50, loss = 0.00133848 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.55114340 Iteration 2, loss = 0.17304745 Iteration 3, loss = 0.07709489 Iteration 4, loss = 0.04343422 Iteration 5, loss = 0.02865564 Iteration 6, loss = 0.02080700 Iteration 7, loss = 0.01617237 Iteration 8, loss = 0.01298674 Iteration 9, loss = 0.01080965 Iteration 10, loss = 0.00914749 Iteration 11, loss = 0.00788655 Iteration 12, loss = 0.00690470 Iteration 13, loss = 0.00625172 Iteration 14, loss = 0.00592377 Iteration 15, loss = 0.00521417 Iteration 16, loss = 0.00481038 Iteration 17, loss = 0.00458048 Iteration 18, loss = 0.00416392 Iteration 19, loss = 0.00390122 Iteration 20, loss = 0.00376939 Iteration 21, loss = 0.00376561 Iteration 22, loss = 0.00340188 Iteration 23, loss = 0.00331058 Iteration 24, loss = 0.00300030 Iteration 25, loss = 0.00289971 Iteration 26, loss = 0.00273109 Iteration 27, loss = 0.00279273 Iteration 28, loss = 0.00302390 Iteration 29, loss = 0.00273168 Iteration 30, loss = 0.00261711 Iteration 31, loss = 0.00224801 Iteration 32, loss = 0.00217889 Iteration 33, loss = 0.00214045 Iteration 34, loss = 0.00206217 Iteration 35, loss = 0.00231222 Iteration 36, loss = 0.00204085 Iteration 37, loss = 0.00183952 Iteration 38, loss = 0.00177902 Iteration 39, loss = 0.00172863 Iteration 40, loss = 0.00168419 Iteration 41, loss = 0.00161795 Iteration 42, loss = 0.00156423 Iteration 43, loss = 0.00152192 Iteration 44, loss = 0.00149162 Iteration 45, loss = 0.00145607 Iteration 46, loss = 0.00142763 Iteration 47, loss = 0.00138739 Iteration 48, loss = 0.00136519 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.29368812 Iteration 2, loss = 0.08672129 Iteration 3, loss = 0.04204772 Iteration 4, loss = 0.02603598 Iteration 5, loss = 0.01836200 Iteration 6, loss = 0.01398136 Iteration 7, loss = 0.01124741 Iteration 8, loss = 0.00906559 Iteration 9, loss = 0.00757005 Iteration 10, loss = 0.00647572 Iteration 11, loss = 0.00582787 Iteration 12, loss = 0.00510735 Iteration 13, loss = 0.00456907 Iteration 14, loss = 0.00422419 Iteration 15, loss = 0.00391751 Iteration 16, loss = 0.00376555 Iteration 17, loss = 0.00348878 Iteration 18, loss = 0.00310562 Iteration 19, loss = 0.00315835 Iteration 20, loss = 0.00284591 Iteration 21, loss = 0.00263648 Iteration 22, loss = 0.00253620 Iteration 23, loss = 0.00238036 Iteration 24, loss = 0.00238102 Iteration 25, loss = 0.00227258 Iteration 26, loss = 0.00233026 Iteration 27, loss = 0.00194979 Iteration 28, loss = 0.00187937 Iteration 29, loss = 0.00183769 Iteration 30, loss = 0.00175032 Iteration 31, loss = 0.00168253 Iteration 32, loss = 0.00158857 Iteration 33, loss = 0.00153565 Iteration 34, loss = 0.00151026 Iteration 35, loss = 0.00142554 Iteration 36, loss = 0.00136908 Iteration 37, loss = 0.00134481 Iteration 38, loss = 0.00135820 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.39906809 Iteration 2, loss = 0.12038089 Iteration 3, loss = 0.05688609 Iteration 4, loss = 0.03446968 Iteration 5, loss = 0.02384567 Iteration 6, loss = 0.01758775 Iteration 7, loss = 0.01422760 Iteration 8, loss = 0.01137183 Iteration 9, loss = 0.01000022 Iteration 10, loss = 0.00811660 Iteration 11, loss = 0.00735747 Iteration 12, loss = 0.00659104 Iteration 13, loss = 0.00589823 Iteration 14, loss = 0.00548703 Iteration 15, loss = 0.00473018 Iteration 16, loss = 0.00472576 Iteration 17, loss = 0.00410974 Iteration 18, loss = 0.00403030 Iteration 19, loss = 0.00402474 Iteration 20, loss = 0.00347217 Iteration 21, loss = 0.00310215 Iteration 22, loss = 0.00323516 Iteration 23, loss = 0.00278193 Iteration 24, loss = 0.00267838 Iteration 25, loss = 0.00313989 Iteration 26, loss = 0.00260791 Iteration 27, loss = 0.00255545 Iteration 28, loss = 0.00218998 Iteration 29, loss = 0.00225103 Iteration 30, loss = 0.00206939 Iteration 31, loss = 0.00222618 Iteration 32, loss = 0.00191002 Iteration 33, loss = 0.00186737 Iteration 34, loss = 0.00184071 Iteration 35, loss = 0.00173821 Iteration 36, loss = 0.00163944 Iteration 37, loss = 0.00162017 Iteration 38, loss = 0.00157250 Iteration 39, loss = 0.00157532 Iteration 40, loss = 0.00148584 Iteration 41, loss = 0.00139069 Iteration 42, loss = 0.00135928 Iteration 43, loss = 0.00132886 Iteration 44, loss = 0.00126288 Iteration 45, loss = 0.00123745 Iteration 46, loss = 0.00121512 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.41790628 Iteration 2, loss = 0.12378369 Iteration 3, loss = 0.05861225 Iteration 4, loss = 0.03498090 Iteration 5, loss = 0.02404694 Iteration 6, loss = 0.01793578 Iteration 7, loss = 0.01421313 Iteration 8, loss = 0.01170974 Iteration 9, loss = 0.00985564 Iteration 10, loss = 0.00863617 Iteration 11, loss = 0.00755393 Iteration 12, loss = 0.00643608 Iteration 13, loss = 0.00606597 Iteration 14, loss = 0.00541661 Iteration 15, loss = 0.00504208 Iteration 16, loss = 0.00461175 Iteration 17, loss = 0.00424935 Iteration 18, loss = 0.00405369 Iteration 19, loss = 0.00416084 Iteration 20, loss = 0.00352218 Iteration 21, loss = 0.00327998 Iteration 22, loss = 0.00323103 Iteration 23, loss = 0.00327130 Iteration 24, loss = 0.00290966 Iteration 25, loss = 0.00284958 Iteration 26, loss = 0.00261954 Iteration 27, loss = 0.00247343 Iteration 28, loss = 0.00236806 Iteration 29, loss = 0.00250369 Iteration 30, loss = 0.00242148 Iteration 31, loss = 0.00223537 Iteration 32, loss = 0.00219508 Iteration 33, loss = 0.00198908 Iteration 34, loss = 0.00189375 Iteration 35, loss = 0.00185440 Iteration 36, loss = 0.00174939 Iteration 37, loss = 0.00190096 Iteration 38, loss = 0.00166657 Iteration 39, loss = 0.00168119 Iteration 40, loss = 0.00150604 Iteration 41, loss = 0.00155511 Iteration 42, loss = 0.00146949 Iteration 43, loss = 0.00137649 Iteration 44, loss = 0.00142627 Iteration 45, loss = 0.00159893 Iteration 46, loss = 0.00129917 Iteration 47, loss = 0.00123111 Iteration 48, loss = 0.00118302 Iteration 49, loss = 0.00114571 Iteration 50, loss = 0.00110899 Iteration 51, loss = 0.00109513 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.51530748 Iteration 2, loss = 0.16412044 Iteration 3, loss = 0.07536707 Iteration 4, loss = 0.04340753 Iteration 5, loss = 0.02885236 Iteration 6, loss = 0.02080798 Iteration 7, loss = 0.01606450 Iteration 8, loss = 0.01303287 Iteration 9, loss = 0.01078985 Iteration 10, loss = 0.00916437 Iteration 11, loss = 0.00799075 Iteration 12, loss = 0.00710669 Iteration 13, loss = 0.00636687 Iteration 14, loss = 0.00566451 Iteration 15, loss = 0.00547660 Iteration 16, loss = 0.00499479 Iteration 17, loss = 0.00459943 Iteration 18, loss = 0.00453064 Iteration 19, loss = 0.00409921 Iteration 20, loss = 0.00385983 Iteration 21, loss = 0.00387507 Iteration 22, loss = 0.00342853 Iteration 23, loss = 0.00308413 Iteration 24, loss = 0.00293735 Iteration 25, loss = 0.00279075 Iteration 26, loss = 0.00273134 Iteration 27, loss = 0.00265075 Iteration 28, loss = 0.00275643 Iteration 29, loss = 0.00240633 Iteration 30, loss = 0.00233961 Iteration 31, loss = 0.00221710 Iteration 32, loss = 0.00218278 Iteration 33, loss = 0.00206592 Iteration 34, loss = 0.00209242 Iteration 35, loss = 0.00203025 Iteration 36, loss = 0.00187529 Iteration 37, loss = 0.00181588 Iteration 38, loss = 0.00172149 Iteration 39, loss = 0.00175580 Iteration 40, loss = 0.00163134 Iteration 41, loss = 0.00157481 Iteration 42, loss = 0.00150942 Iteration 43, loss = 0.00147556 Iteration 44, loss = 0.00142161 Iteration 45, loss = 0.00139369 Iteration 46, loss = 0.00136729 Iteration 47, loss = 0.00146424 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.36309898 Iteration 2, loss = 0.11795979 Iteration 3, loss = 0.05530499 Iteration 4, loss = 0.03227677 Iteration 5, loss = 0.02130114 Iteration 6, loss = 0.01528913 Iteration 7, loss = 0.01160786 Iteration 8, loss = 0.00915343 Iteration 9, loss = 0.00747202 Iteration 10, loss = 0.00625750 Iteration 11, loss = 0.00535461 Iteration 12, loss = 0.00465883 Iteration 13, loss = 0.00412633 Iteration 14, loss = 0.00371362 Iteration 15, loss = 0.00334247 Iteration 16, loss = 0.00305975 Iteration 17, loss = 0.00282318 Iteration 18, loss = 0.00263507 Iteration 19, loss = 0.00244921 Iteration 20, loss = 0.00230122 Iteration 21, loss = 0.00218060 Iteration 22, loss = 0.00206579 Iteration 23, loss = 0.00196615 Iteration 24, loss = 0.00186775 Iteration 25, loss = 0.00178710 Iteration 26, loss = 0.00171436 Iteration 27, loss = 0.00165215 Iteration 28, loss = 0.00158739 Iteration 29, loss = 0.00153750 Iteration 30, loss = 0.00148377 Iteration 31, loss = 0.00144711 Iteration 32, loss = 0.00138680 Iteration 33, loss = 0.00134012 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.23068231 Iteration 2, loss = 0.07160740 Iteration 3, loss = 0.03476555 Iteration 4, loss = 0.02159312 Iteration 5, loss = 0.01540145 Iteration 6, loss = 0.01198412 Iteration 7, loss = 0.00978626 Iteration 8, loss = 0.00819176 Iteration 9, loss = 0.00687422 Iteration 10, loss = 0.00602942 Iteration 11, loss = 0.00604450 Iteration 12, loss = 0.00485215 Iteration 13, loss = 0.00449346 Iteration 14, loss = 0.00389104 Iteration 15, loss = 0.00455856 Iteration 16, loss = 0.00354478 Iteration 17, loss = 0.00323792 Iteration 18, loss = 0.00302840 Iteration 19, loss = 0.00321148 Iteration 20, loss = 0.00356004 Iteration 21, loss = 0.00273381 Iteration 22, loss = 0.00262575 Iteration 23, loss = 0.00230629 Iteration 24, loss = 0.00238533 Iteration 25, loss = 0.00222497 Iteration 26, loss = 0.00261214 Iteration 27, loss = 0.00209269 Iteration 28, loss = 0.00181092 Iteration 29, loss = 0.00184989 Iteration 30, loss = 0.00182009 Iteration 31, loss = 0.00172747 Iteration 32, loss = 0.00160357 Iteration 33, loss = 0.00167081 Iteration 34, loss = 0.00168677 Iteration 35, loss = 0.00143613 Iteration 36, loss = 0.00134376 Iteration 37, loss = 0.00129554 Iteration 38, loss = 0.00123285 Iteration 39, loss = 0.00122809 Iteration 40, loss = 0.00116591 Iteration 41, loss = 0.00115272 Iteration 42, loss = 0.00110214 Iteration 43, loss = 0.00106353 Iteration 44, loss = 0.00102703 Iteration 45, loss = 0.00101217 Iteration 46, loss = 0.00101695 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.37309430 Iteration 2, loss = 0.10955376 Iteration 3, loss = 0.05208618 Iteration 4, loss = 0.03162776 Iteration 5, loss = 0.02197772 Iteration 6, loss = 0.01677791 Iteration 7, loss = 0.01333767 Iteration 8, loss = 0.01061867 Iteration 9, loss = 0.00888259 Iteration 10, loss = 0.00781675 Iteration 11, loss = 0.00680931 Iteration 12, loss = 0.00597942 Iteration 13, loss = 0.00549813 Iteration 14, loss = 0.00473199 Iteration 15, loss = 0.00431165 Iteration 16, loss = 0.00435781 Iteration 17, loss = 0.00371721 Iteration 18, loss = 0.00359413 Iteration 19, loss = 0.00348455 Iteration 20, loss = 0.00363183 Iteration 21, loss = 0.00324896 Iteration 22, loss = 0.00285377 Iteration 23, loss = 0.00281015 Iteration 24, loss = 0.00262535 Iteration 25, loss = 0.00244478 Iteration 26, loss = 0.00233731 Iteration 27, loss = 0.00221601 Iteration 28, loss = 0.00223433 Iteration 29, loss = 0.00206479 Iteration 30, loss = 0.00192760 Iteration 31, loss = 0.00187601 Iteration 32, loss = 0.00182693 Iteration 33, loss = 0.00178042 Iteration 34, loss = 0.00168182 Iteration 35, loss = 0.00170229 Iteration 36, loss = 0.00157408 Iteration 37, loss = 0.00150457 Iteration 38, loss = 0.00145368 Iteration 39, loss = 0.00141529 Iteration 40, loss = 0.00140333 Iteration 41, loss = 0.00136381 Iteration 42, loss = 0.00128537 Iteration 43, loss = 0.00123984 Iteration 44, loss = 0.00121631 Iteration 45, loss = 0.00116407 Iteration 46, loss = 0.00113995 Iteration 47, loss = 0.00109950 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.51652962 Iteration 2, loss = 0.16628851 Iteration 3, loss = 0.07611923 Iteration 4, loss = 0.04361129 Iteration 5, loss = 0.02863188 Iteration 6, loss = 0.02066939 Iteration 7, loss = 0.01578491 Iteration 8, loss = 0.01270273 Iteration 9, loss = 0.01052338 Iteration 10, loss = 0.00895291 Iteration 11, loss = 0.00784321 Iteration 12, loss = 0.00691673 Iteration 13, loss = 0.00625303 Iteration 14, loss = 0.00583385 Iteration 15, loss = 0.00522931 Iteration 16, loss = 0.00482551 Iteration 17, loss = 0.00449684 Iteration 18, loss = 0.00430753 Iteration 19, loss = 0.00390160 Iteration 20, loss = 0.00366745 Iteration 21, loss = 0.00351295 Iteration 22, loss = 0.00337048 Iteration 23, loss = 0.00309752 Iteration 24, loss = 0.00302272 Iteration 25, loss = 0.00291010 Iteration 26, loss = 0.00277088 Iteration 27, loss = 0.00259020 Iteration 28, loss = 0.00249838 Iteration 29, loss = 0.00245175 Iteration 30, loss = 0.00238214 Iteration 31, loss = 0.00220487 Iteration 32, loss = 0.00219519 Iteration 33, loss = 0.00211149 Iteration 34, loss = 0.00210748 Iteration 35, loss = 0.00218016 Iteration 36, loss = 0.00185948 Iteration 37, loss = 0.00187182 Iteration 38, loss = 0.00177896 Iteration 39, loss = 0.00181178 Iteration 40, loss = 0.00165431 Iteration 41, loss = 0.00173908 Iteration 42, loss = 0.00159974 Iteration 43, loss = 0.00193006 Iteration 44, loss = 0.00199814 Iteration 45, loss = 0.00154366 Iteration 46, loss = 0.00166812 Iteration 47, loss = 0.00138138 Iteration 48, loss = 0.00132569 Iteration 49, loss = 0.00130017 Iteration 50, loss = 0.00126102 Iteration 51, loss = 0.00122140 Iteration 52, loss = 0.00122359 Iteration 53, loss = 0.00119838 Iteration 54, loss = 0.00113930 Iteration 55, loss = 0.00113409 Iteration 56, loss = 0.00110171 Iteration 57, loss = 0.00111754 Iteration 58, loss = 0.00107526 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.35761577 Iteration 2, loss = 0.10549998 Iteration 3, loss = 0.04722666 Iteration 4, loss = 0.02764456 Iteration 5, loss = 0.01854141 Iteration 6, loss = 0.01365260 Iteration 7, loss = 0.01056732 Iteration 8, loss = 0.00862956 Iteration 9, loss = 0.00691647 Iteration 10, loss = 0.00634417 Iteration 11, loss = 0.00513792 Iteration 12, loss = 0.00492655 Iteration 13, loss = 0.00416399 Iteration 14, loss = 0.00358990 Iteration 15, loss = 0.00331711 Iteration 16, loss = 0.00300437 Iteration 17, loss = 0.00281454 Iteration 18, loss = 0.00258208 Iteration 19, loss = 0.00237954 Iteration 20, loss = 0.00239964 Iteration 21, loss = 0.00244094 Iteration 22, loss = 0.00201096 Iteration 23, loss = 0.00195616 Iteration 24, loss = 0.00168472 Iteration 25, loss = 0.00165706 Iteration 26, loss = 0.00162070 Iteration 27, loss = 0.00153497 Iteration 28, loss = 0.00140567 Iteration 29, loss = 0.00133270 Iteration 30, loss = 0.00137350 Iteration 31, loss = 0.00138317 Iteration 32, loss = 0.00132052 Iteration 33, loss = 0.00132565 Iteration 34, loss = 0.00125590 Iteration 35, loss = 0.00129883 Iteration 36, loss = 0.00112385 Iteration 37, loss = 0.00110430 Iteration 38, loss = 0.00096400 Iteration 39, loss = 0.00101907 Iteration 40, loss = 0.00087655 Iteration 41, loss = 0.00088822 Iteration 42, loss = 0.00088451 Iteration 43, loss = 0.00086912 Iteration 44, loss = 0.00077267 Iteration 45, loss = 0.00081165 Iteration 46, loss = 0.00075032 Iteration 47, loss = 0.00075525 Iteration 48, loss = 0.00072815 Iteration 49, loss = 0.00071117 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.50982098 Iteration 2, loss = 0.15400655 Iteration 3, loss = 0.07262273 Iteration 4, loss = 0.04329076 Iteration 5, loss = 0.02951526 Iteration 6, loss = 0.02192676 Iteration 7, loss = 0.01738511 Iteration 8, loss = 0.01427373 Iteration 9, loss = 0.01178341 Iteration 10, loss = 0.01022888 Iteration 11, loss = 0.00902832 Iteration 12, loss = 0.00774048 Iteration 13, loss = 0.00689848 Iteration 14, loss = 0.00636768 Iteration 15, loss = 0.00592057 Iteration 16, loss = 0.00557018 Iteration 17, loss = 0.00491673 Iteration 18, loss = 0.00469154 Iteration 19, loss = 0.00445011 Iteration 20, loss = 0.00418564 Iteration 21, loss = 0.00395148 Iteration 22, loss = 0.00377131 Iteration 23, loss = 0.00406364 Iteration 24, loss = 0.00344670 Iteration 25, loss = 0.00342339 Iteration 26, loss = 0.00328326 Iteration 27, loss = 0.00294885 Iteration 28, loss = 0.00305806 Iteration 29, loss = 0.00278225 Iteration 30, loss = 0.00270929 Iteration 31, loss = 0.00247202 Iteration 32, loss = 0.00244684 Iteration 33, loss = 0.00250471 Iteration 34, loss = 0.00236052 Iteration 35, loss = 0.00223524 Iteration 36, loss = 0.00220913 Iteration 37, loss = 0.00202001 Iteration 38, loss = 0.00197600 Iteration 39, loss = 0.00192326 Iteration 40, loss = 0.00199162 Iteration 41, loss = 0.00182013 Iteration 42, loss = 0.00173598 Iteration 43, loss = 0.00172706 Iteration 44, loss = 0.00179180 Iteration 45, loss = 0.00175434 Iteration 46, loss = 0.00192308 Iteration 47, loss = 0.00161181 Iteration 48, loss = 0.00146343 Iteration 49, loss = 0.00140123 Iteration 50, loss = 0.00136839 Iteration 51, loss = 0.00132068 Iteration 52, loss = 0.00130058 Iteration 53, loss = 0.00129807 Iteration 54, loss = 0.00136268 Iteration 55, loss = 0.00135194 Iteration 56, loss = 0.00120458 Iteration 57, loss = 0.00115852 Iteration 58, loss = 0.00112687 Iteration 59, loss = 0.00108232 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.39732181 Iteration 2, loss = 0.12311265 Iteration 3, loss = 0.05759234 Iteration 4, loss = 0.03388481 Iteration 5, loss = 0.02285622 Iteration 6, loss = 0.01682141 Iteration 7, loss = 0.01305379 Iteration 8, loss = 0.01061809 Iteration 9, loss = 0.00897460 Iteration 10, loss = 0.00771235 Iteration 11, loss = 0.00677076 Iteration 12, loss = 0.00601207 Iteration 13, loss = 0.00553954 Iteration 14, loss = 0.00487095 Iteration 15, loss = 0.00454807 Iteration 16, loss = 0.00426604 Iteration 17, loss = 0.00407811 Iteration 18, loss = 0.00382068 Iteration 19, loss = 0.00351372 Iteration 20, loss = 0.00336385 Iteration 21, loss = 0.00327838 Iteration 22, loss = 0.00318717 Iteration 23, loss = 0.00286356 Iteration 24, loss = 0.00284324 Iteration 25, loss = 0.00276070 Iteration 26, loss = 0.00250592 Iteration 27, loss = 0.00253185 Iteration 28, loss = 0.00251789 Iteration 29, loss = 0.00238502 Iteration 30, loss = 0.00221385 Iteration 31, loss = 0.00226489 Iteration 32, loss = 0.00200755 Iteration 33, loss = 0.00195174 Iteration 34, loss = 0.00187404 Iteration 35, loss = 0.00194195 Iteration 36, loss = 0.00189824 Iteration 37, loss = 0.00169706 Iteration 38, loss = 0.00163491 Iteration 39, loss = 0.00156566 Iteration 40, loss = 0.00151927 Iteration 41, loss = 0.00150219 Iteration 42, loss = 0.00146006 Iteration 43, loss = 0.00140432 Iteration 44, loss = 0.00136885 Iteration 45, loss = 0.00133540 Iteration 46, loss = 0.00129971 Iteration 47, loss = 0.00128832 Iteration 48, loss = 0.00123354 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.37521270 Iteration 2, loss = 0.10882308 Iteration 3, loss = 0.04972168 Iteration 4, loss = 0.02982703 Iteration 5, loss = 0.02054189 Iteration 6, loss = 0.01529679 Iteration 7, loss = 0.01208278 Iteration 8, loss = 0.00994326 Iteration 9, loss = 0.00834629 Iteration 10, loss = 0.00744351 Iteration 11, loss = 0.00641329 Iteration 12, loss = 0.00569041 Iteration 13, loss = 0.00521509 Iteration 14, loss = 0.00484239 Iteration 15, loss = 0.00428795 Iteration 16, loss = 0.00402359 Iteration 17, loss = 0.00371156 Iteration 18, loss = 0.00359994 Iteration 19, loss = 0.00329829 Iteration 20, loss = 0.00309715 Iteration 21, loss = 0.00290802 Iteration 22, loss = 0.00281233 Iteration 23, loss = 0.00264692 Iteration 24, loss = 0.00268245 Iteration 25, loss = 0.00254818 Iteration 26, loss = 0.00228904 Iteration 27, loss = 0.00224711 Iteration 28, loss = 0.00238567 Iteration 29, loss = 0.00217987 Iteration 30, loss = 0.00201337 Iteration 31, loss = 0.00188017 Iteration 32, loss = 0.00184180 Iteration 33, loss = 0.00180180 Iteration 34, loss = 0.00172209 Iteration 35, loss = 0.00169568 Iteration 36, loss = 0.00160738 Iteration 37, loss = 0.00154054 Iteration 38, loss = 0.00149483 Iteration 39, loss = 0.00144786 Iteration 40, loss = 0.00140461 Iteration 41, loss = 0.00138701 Iteration 42, loss = 0.00132801 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.30333965 Iteration 2, loss = 0.09127064 Iteration 3, loss = 0.04323134 Iteration 4, loss = 0.02621405 Iteration 5, loss = 0.01819179 Iteration 6, loss = 0.01438016 Iteration 7, loss = 0.01074930 Iteration 8, loss = 0.00953824 Iteration 9, loss = 0.00758080 Iteration 10, loss = 0.00661746 Iteration 11, loss = 0.00591435 Iteration 12, loss = 0.00522819 Iteration 13, loss = 0.00465829 Iteration 14, loss = 0.00417558 Iteration 15, loss = 0.00428818 Iteration 16, loss = 0.00386579 Iteration 17, loss = 0.00342951 Iteration 18, loss = 0.00329896 Iteration 19, loss = 0.00318726 Iteration 20, loss = 0.00292403 Iteration 21, loss = 0.00282077 Iteration 22, loss = 0.00249270 Iteration 23, loss = 0.00234493 Iteration 24, loss = 0.00226229 Iteration 25, loss = 0.00206719 Iteration 26, loss = 0.00219132 Iteration 27, loss = 0.00215763 Iteration 28, loss = 0.00187840 Iteration 29, loss = 0.00192308 Iteration 30, loss = 0.00196768 Iteration 31, loss = 0.00194015 Iteration 32, loss = 0.00175168 Iteration 33, loss = 0.00166546 Iteration 34, loss = 0.00159552 Iteration 35, loss = 0.00144910 Iteration 36, loss = 0.00141580 Iteration 37, loss = 0.00136383 Iteration 38, loss = 0.00137001 Iteration 39, loss = 0.00126402 Iteration 40, loss = 0.00128714 Iteration 41, loss = 0.00135561 Iteration 42, loss = 0.00117736 Iteration 43, loss = 0.00114373 Iteration 44, loss = 0.00113311 Iteration 45, loss = 0.00114205 Iteration 46, loss = 0.00128224 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.43161317 Iteration 2, loss = 0.12883322 Iteration 3, loss = 0.06044352 Iteration 4, loss = 0.03627028 Iteration 5, loss = 0.02485141 Iteration 6, loss = 0.01865495 Iteration 7, loss = 0.01449191 Iteration 8, loss = 0.01192947 Iteration 9, loss = 0.00971512 Iteration 10, loss = 0.00856296 Iteration 11, loss = 0.00761563 Iteration 12, loss = 0.00704447 Iteration 13, loss = 0.00629373 Iteration 14, loss = 0.00530033 Iteration 15, loss = 0.00496817 Iteration 16, loss = 0.00458388 Iteration 17, loss = 0.00408988 Iteration 18, loss = 0.00415599 Iteration 19, loss = 0.00381395 Iteration 20, loss = 0.00338847 Iteration 21, loss = 0.00330873 Iteration 22, loss = 0.00310596 Iteration 23, loss = 0.00293404 Iteration 24, loss = 0.00276094 Iteration 25, loss = 0.00263897 Iteration 26, loss = 0.00254496 Iteration 27, loss = 0.00241735 Iteration 28, loss = 0.00235713 Iteration 29, loss = 0.00220148 Iteration 30, loss = 0.00213053 Iteration 31, loss = 0.00206484 Iteration 32, loss = 0.00194767 Iteration 33, loss = 0.00189429 Iteration 34, loss = 0.00184586 Iteration 35, loss = 0.00180147 Iteration 36, loss = 0.00176664 Iteration 37, loss = 0.00163885 Iteration 38, loss = 0.00161698 Iteration 39, loss = 0.00155141 Iteration 40, loss = 0.00148746 Iteration 41, loss = 0.00159385 Iteration 42, loss = 0.00162500 Iteration 43, loss = 0.00139967 Iteration 44, loss = 0.00142728 Iteration 45, loss = 0.00131950 Iteration 46, loss = 0.00127427 Iteration 47, loss = 0.00119344 Iteration 48, loss = 0.00116975 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.38568552 Iteration 2, loss = 0.12050430 Iteration 3, loss = 0.05679546 Iteration 4, loss = 0.03272948 Iteration 5, loss = 0.02220289 Iteration 6, loss = 0.01634029 Iteration 7, loss = 0.01283353 Iteration 8, loss = 0.01035520 Iteration 9, loss = 0.00883885 Iteration 10, loss = 0.00756242 Iteration 11, loss = 0.00668353 Iteration 12, loss = 0.00639818 Iteration 13, loss = 0.00539052 Iteration 14, loss = 0.00515654 Iteration 15, loss = 0.00443843 Iteration 16, loss = 0.00413762 Iteration 17, loss = 0.00380043 Iteration 18, loss = 0.00361808 Iteration 19, loss = 0.00341783 Iteration 20, loss = 0.00334030 Iteration 21, loss = 0.00348618 Iteration 22, loss = 0.00314803 Iteration 23, loss = 0.00278459 Iteration 24, loss = 0.00272185 Iteration 25, loss = 0.00259123 Iteration 26, loss = 0.00246517 Iteration 27, loss = 0.00251879 Iteration 28, loss = 0.00248768 Iteration 29, loss = 0.00213462 Iteration 30, loss = 0.00210873 Iteration 31, loss = 0.00207266 Iteration 32, loss = 0.00199103 Iteration 33, loss = 0.00201283 Iteration 34, loss = 0.00213043 Iteration 35, loss = 0.00194260 Iteration 36, loss = 0.00183019 Iteration 37, loss = 0.00182695 Iteration 38, loss = 0.00168426 Iteration 39, loss = 0.00152857 Iteration 40, loss = 0.00147006 Iteration 41, loss = 0.00145908 Iteration 42, loss = 0.00136493 Iteration 43, loss = 0.00132970 Iteration 44, loss = 0.00128853 Iteration 45, loss = 0.00125412 Iteration 46, loss = 0.00120915 Iteration 47, loss = 0.00121020 Iteration 48, loss = 0.00115409 Iteration 49, loss = 0.00112196 Iteration 50, loss = 0.00109271 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.34665472 Iteration 2, loss = 0.10249175 Iteration 3, loss = 0.04871039 Iteration 4, loss = 0.02984487 Iteration 5, loss = 0.02053446 Iteration 6, loss = 0.01541991 Iteration 7, loss = 0.01268127 Iteration 8, loss = 0.01036948 Iteration 9, loss = 0.00848155 Iteration 10, loss = 0.00756394 Iteration 11, loss = 0.00681189 Iteration 12, loss = 0.00591289 Iteration 13, loss = 0.00537839 Iteration 14, loss = 0.00525500 Iteration 15, loss = 0.00476707 Iteration 16, loss = 0.00421155 Iteration 17, loss = 0.00390315 Iteration 18, loss = 0.00373883 Iteration 19, loss = 0.00377786 Iteration 20, loss = 0.00353619 Iteration 21, loss = 0.00313136 Iteration 22, loss = 0.00308660 Iteration 23, loss = 0.00286061 Iteration 24, loss = 0.00269563 Iteration 25, loss = 0.00262428 Iteration 26, loss = 0.00253311 Iteration 27, loss = 0.00243360 Iteration 28, loss = 0.00224421 Iteration 29, loss = 0.00230690 Iteration 30, loss = 0.00212423 Iteration 31, loss = 0.00202141 Iteration 32, loss = 0.00195950 Iteration 33, loss = 0.00189480 Iteration 34, loss = 0.00190080 Iteration 35, loss = 0.00169370 Iteration 36, loss = 0.00173310 Iteration 37, loss = 0.00161495 Iteration 38, loss = 0.00164059 Iteration 39, loss = 0.00154709 Iteration 40, loss = 0.00148760 Iteration 41, loss = 0.00149286 Iteration 42, loss = 0.00135823 Iteration 43, loss = 0.00132789 Iteration 44, loss = 0.00132598 Iteration 45, loss = 0.00128699 Iteration 46, loss = 0.00122301 Iteration 47, loss = 0.00121078 Iteration 48, loss = 0.00117275 Iteration 49, loss = 0.00128319 Iteration 50, loss = 0.00112046 Iteration 51, loss = 0.00106863 Iteration 52, loss = 0.00104083 Iteration 53, loss = 0.00101305 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.31292814 Iteration 2, loss = 0.09797330 Iteration 3, loss = 0.04704036 Iteration 4, loss = 0.02801263 Iteration 5, loss = 0.01927511 Iteration 6, loss = 0.01434487 Iteration 7, loss = 0.01142755 Iteration 8, loss = 0.00961274 Iteration 9, loss = 0.00794903 Iteration 10, loss = 0.00698547 Iteration 11, loss = 0.00603090 Iteration 12, loss = 0.00543428 Iteration 13, loss = 0.00489895 Iteration 14, loss = 0.00464467 Iteration 15, loss = 0.00411647 Iteration 16, loss = 0.00386033 Iteration 17, loss = 0.00373108 Iteration 18, loss = 0.00332518 Iteration 19, loss = 0.00306808 Iteration 20, loss = 0.00304712 Iteration 21, loss = 0.00281050 Iteration 22, loss = 0.00266827 Iteration 23, loss = 0.00250334 Iteration 24, loss = 0.00237840 Iteration 25, loss = 0.00241800 Iteration 26, loss = 0.00220077 Iteration 27, loss = 0.00211037 Iteration 28, loss = 0.00204921 Iteration 29, loss = 0.00202120 Iteration 30, loss = 0.00188826 Iteration 31, loss = 0.00187648 Iteration 32, loss = 0.00172567 Iteration 33, loss = 0.00173916 Iteration 34, loss = 0.00161734 Iteration 35, loss = 0.00159909 Iteration 36, loss = 0.00152148 Iteration 37, loss = 0.00146178 Iteration 38, loss = 0.00154103 Iteration 39, loss = 0.00140608 Iteration 40, loss = 0.00136380 Iteration 41, loss = 0.00130942 Iteration 42, loss = 0.00129792 Iteration 43, loss = 0.00139708 Iteration 44, loss = 0.00140185 Iteration 45, loss = 0.00120963 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.37534944 Iteration 2, loss = 0.11468496 Iteration 3, loss = 0.05275112 Iteration 4, loss = 0.03068436 Iteration 5, loss = 0.02075840 Iteration 6, loss = 0.01537387 Iteration 7, loss = 0.01196584 Iteration 8, loss = 0.00998373 Iteration 9, loss = 0.00842683 Iteration 10, loss = 0.00746986 Iteration 11, loss = 0.00665201 Iteration 12, loss = 0.00560265 Iteration 13, loss = 0.00567780 Iteration 14, loss = 0.00487515 Iteration 15, loss = 0.00446395 Iteration 16, loss = 0.00439044 Iteration 17, loss = 0.00427801 Iteration 18, loss = 0.00378965 Iteration 19, loss = 0.00346670 Iteration 20, loss = 0.00339432 Iteration 21, loss = 0.00320910 Iteration 22, loss = 0.00308852 Iteration 23, loss = 0.00284144 Iteration 24, loss = 0.00275091 Iteration 25, loss = 0.00263838 Iteration 26, loss = 0.00254669 Iteration 27, loss = 0.00248148 Iteration 28, loss = 0.00243535 Iteration 29, loss = 0.00216749 Iteration 30, loss = 0.00212682 Iteration 31, loss = 0.00226631 Iteration 32, loss = 0.00220628 Iteration 33, loss = 0.00190933 Iteration 34, loss = 0.00187291 Iteration 35, loss = 0.00175895 Iteration 36, loss = 0.00177715 Iteration 37, loss = 0.00181467 Iteration 38, loss = 0.00166432 Iteration 39, loss = 0.00156070 Iteration 40, loss = 0.00152289 Iteration 41, loss = 0.00146520 Iteration 42, loss = 0.00139030 Iteration 43, loss = 0.00138556 Iteration 44, loss = 0.00159541 Iteration 45, loss = 0.00143950 Iteration 46, loss = 0.00125900 Iteration 47, loss = 0.00124306 Iteration 48, loss = 0.00117172 Iteration 49, loss = 0.00114133 Iteration 50, loss = 0.00111080 Iteration 51, loss = 0.00107252 Iteration 52, loss = 0.00103303 Iteration 53, loss = 0.00104267 Iteration 54, loss = 0.00099075 Iteration 55, loss = 0.00095664 Iteration 56, loss = 0.00092814 Iteration 57, loss = 0.00091887 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.13086255 Iteration 2, loss = 0.04170011 Iteration 3, loss = 0.02040256 Iteration 4, loss = 0.01274585 Iteration 5, loss = 0.00932677 Iteration 6, loss = 0.00719289 Iteration 7, loss = 0.00606372 Iteration 8, loss = 0.00546956 Iteration 9, loss = 0.00458532 Iteration 10, loss = 0.00402309 Iteration 11, loss = 0.00366743 Iteration 12, loss = 0.00332507 Iteration 13, loss = 0.00317108 Iteration 14, loss = 0.00289457 Iteration 15, loss = 0.00269585 Iteration 16, loss = 0.00262339 Iteration 17, loss = 0.00234413 Iteration 18, loss = 0.00219806 Iteration 19, loss = 0.00218057 Iteration 20, loss = 0.00207933 Iteration 21, loss = 0.00203369 Iteration 22, loss = 0.00191445 Iteration 23, loss = 0.00185184 Iteration 24, loss = 0.00167465 Iteration 25, loss = 0.00158947 Iteration 26, loss = 0.00167670 Iteration 27, loss = 0.00144150 Iteration 28, loss = 0.00144653 Iteration 29, loss = 0.00140266 Iteration 30, loss = 0.00133939 Iteration 31, loss = 0.00134202 Iteration 32, loss = 0.00115574 Iteration 33, loss = 0.00124765 Iteration 34, loss = 0.00110521 Iteration 35, loss = 0.00109677 Iteration 36, loss = 0.00117947 Iteration 37, loss = 0.00103926 Iteration 38, loss = 0.00109920 Iteration 39, loss = 0.00094777 Iteration 40, loss = 0.00089877 Iteration 41, loss = 0.00090704 Iteration 42, loss = 0.00086429 Iteration 43, loss = 0.00087573 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.35616144 Iteration 2, loss = 0.10666123 Iteration 3, loss = 0.05150616 Iteration 4, loss = 0.03170027 Iteration 5, loss = 0.02201073 Iteration 6, loss = 0.01644027 Iteration 7, loss = 0.01317454 Iteration 8, loss = 0.01063160 Iteration 9, loss = 0.00939474 Iteration 10, loss = 0.00807815 Iteration 11, loss = 0.00699673 Iteration 12, loss = 0.00613850 Iteration 13, loss = 0.00557775 Iteration 14, loss = 0.00523988 Iteration 15, loss = 0.00470771 Iteration 16, loss = 0.00439868 Iteration 17, loss = 0.00419075 Iteration 18, loss = 0.00414237 Iteration 19, loss = 0.00359586 Iteration 20, loss = 0.00345483 Iteration 21, loss = 0.00334682 Iteration 22, loss = 0.00300948 Iteration 23, loss = 0.00300882 Iteration 24, loss = 0.00278789 Iteration 25, loss = 0.00286852 Iteration 26, loss = 0.00265143 Iteration 27, loss = 0.00247341 Iteration 28, loss = 0.00258686 Iteration 29, loss = 0.00243719 Iteration 30, loss = 0.00225111 Iteration 31, loss = 0.00207359 Iteration 32, loss = 0.00203797 Iteration 33, loss = 0.00197946 Iteration 34, loss = 0.00185786 Iteration 35, loss = 0.00203933 Iteration 36, loss = 0.00179661 Iteration 37, loss = 0.00175094 Iteration 38, loss = 0.00166859 Iteration 39, loss = 0.00162795 Iteration 40, loss = 0.00173389 Iteration 41, loss = 0.00155511 Iteration 42, loss = 0.00142592 Iteration 43, loss = 0.00140267 Iteration 44, loss = 0.00138045 Iteration 45, loss = 0.00132472 Iteration 46, loss = 0.00127706 Iteration 47, loss = 0.00125811 Iteration 48, loss = 0.00128099 Iteration 49, loss = 0.00131056 Iteration 50, loss = 0.00121101 Iteration 51, loss = 0.00111367 Iteration 52, loss = 0.00108345 Iteration 53, loss = 0.00106333 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.39037897 Iteration 2, loss = 0.11069662 Iteration 3, loss = 0.05132058 Iteration 4, loss = 0.03124078 Iteration 5, loss = 0.02150205 Iteration 6, loss = 0.01607327 Iteration 7, loss = 0.01316998 Iteration 8, loss = 0.01071894 Iteration 9, loss = 0.00928145 Iteration 10, loss = 0.00754691 Iteration 11, loss = 0.00672012 Iteration 12, loss = 0.00610304 Iteration 13, loss = 0.00573441 Iteration 14, loss = 0.00495364 Iteration 15, loss = 0.00510966 Iteration 16, loss = 0.00421151 Iteration 17, loss = 0.00435126 Iteration 18, loss = 0.00387523 Iteration 19, loss = 0.00361906 Iteration 20, loss = 0.00325970 Iteration 21, loss = 0.00316735 Iteration 22, loss = 0.00311668 Iteration 23, loss = 0.00291536 Iteration 24, loss = 0.00302266 Iteration 25, loss = 0.00254548 Iteration 26, loss = 0.00244559 Iteration 27, loss = 0.00285853 Iteration 28, loss = 0.00266749 Iteration 29, loss = 0.00235241 Iteration 30, loss = 0.00221216 Iteration 31, loss = 0.00201137 Iteration 32, loss = 0.00207106 Iteration 33, loss = 0.00189725 Iteration 34, loss = 0.00185868 Iteration 35, loss = 0.00183780 Iteration 36, loss = 0.00168344 Iteration 37, loss = 0.00163790 Iteration 38, loss = 0.00159324 Iteration 39, loss = 0.00151751 Iteration 40, loss = 0.00151668 Iteration 41, loss = 0.00156965 Iteration 42, loss = 0.00178049 Iteration 43, loss = 0.00176631 Iteration 44, loss = 0.00153870 Iteration 45, loss = 0.00150119 Iteration 46, loss = 0.00127782 Iteration 47, loss = 0.00123553 Iteration 48, loss = 0.00118636 Iteration 49, loss = 0.00121250 Iteration 50, loss = 0.00113315 Iteration 51, loss = 0.00110919 Iteration 52, loss = 0.00108098 Iteration 53, loss = 0.00105984 Iteration 54, loss = 0.00103222 Iteration 55, loss = 0.00100483 Iteration 56, loss = 0.00098262 Iteration 57, loss = 0.00096513 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.37438982 Iteration 2, loss = 0.10691015 Iteration 3, loss = 0.05234931 Iteration 4, loss = 0.03242092 Iteration 5, loss = 0.02275699 Iteration 6, loss = 0.01723058 Iteration 7, loss = 0.01361477 Iteration 8, loss = 0.01109147 Iteration 9, loss = 0.00956442 Iteration 10, loss = 0.00809739 Iteration 11, loss = 0.00743104 Iteration 12, loss = 0.00644401 Iteration 13, loss = 0.00566960 Iteration 14, loss = 0.00517480 Iteration 15, loss = 0.00478480 Iteration 16, loss = 0.00484447 Iteration 17, loss = 0.00428747 Iteration 18, loss = 0.00389672 Iteration 19, loss = 0.00368816 Iteration 20, loss = 0.00365754 Iteration 21, loss = 0.00335614 Iteration 22, loss = 0.00309975 Iteration 23, loss = 0.00301162 Iteration 24, loss = 0.00285139 Iteration 25, loss = 0.00275611 Iteration 26, loss = 0.00250033 Iteration 27, loss = 0.00242422 Iteration 28, loss = 0.00235618 Iteration 29, loss = 0.00221580 Iteration 30, loss = 0.00212011 Iteration 31, loss = 0.00206427 Iteration 32, loss = 0.00202010 Iteration 33, loss = 0.00238404 Iteration 34, loss = 0.00231860 Iteration 35, loss = 0.00180828 Iteration 36, loss = 0.00167979 Iteration 37, loss = 0.00163341 Iteration 38, loss = 0.00156574 Iteration 39, loss = 0.00152120 Iteration 40, loss = 0.00148445 Iteration 41, loss = 0.00152519 Iteration 42, loss = 0.00150834 Iteration 43, loss = 0.00140986 Iteration 44, loss = 0.00129513 Iteration 45, loss = 0.00124490 Iteration 46, loss = 0.00121039 Iteration 47, loss = 0.00118558 Iteration 48, loss = 0.00116069 Iteration 49, loss = 0.00112395 Iteration 50, loss = 0.00108123 Iteration 51, loss = 0.00104891 Iteration 52, loss = 0.00102277 Iteration 53, loss = 0.00102928 Iteration 54, loss = 0.00096774 Iteration 55, loss = 0.00093952 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping.
| Set | ROC Accuracy | Balanced Accuracy |
|---|---|---|
| Train Set | 0.9193 ± 0.1326 | 0.8478 ± 0.1960 |
| Test Set | 0.9384 ± 0.1684 | 0.9000 ± 0.2000 |
Train Set ========================================================================================== Iteration 1, loss = 0.26040303 Iteration 2, loss = 0.04473810 Iteration 3, loss = 0.01991561 Iteration 4, loss = 0.01271541 Iteration 5, loss = 0.00936311 Iteration 6, loss = 0.00749187 Iteration 7, loss = 0.00632846 Iteration 8, loss = 0.00551009 Iteration 9, loss = 0.00484746 Iteration 10, loss = 0.00453240 Iteration 11, loss = 0.00411870 Iteration 12, loss = 0.00382299 Iteration 13, loss = 0.00354427 Iteration 14, loss = 0.00332676 Iteration 15, loss = 0.00314106 Iteration 16, loss = 0.00288362 Iteration 17, loss = 0.00291895 Iteration 18, loss = 0.00267202 Iteration 19, loss = 0.00252205 Iteration 20, loss = 0.00241150 Iteration 21, loss = 0.00240166 Iteration 22, loss = 0.00216614 Iteration 23, loss = 0.00207121 Iteration 24, loss = 0.00204725 Iteration 25, loss = 0.00193372 Iteration 26, loss = 0.00188435 Iteration 27, loss = 0.00179656 Iteration 28, loss = 0.00170883 Iteration 29, loss = 0.00169658 Iteration 30, loss = 0.00158452 Iteration 31, loss = 0.00153585 Iteration 32, loss = 0.00150256 Iteration 33, loss = 0.00143084 Iteration 34, loss = 0.00140293 Iteration 35, loss = 0.00139495 Iteration 36, loss = 0.00131874 Iteration 37, loss = 0.00130927 Iteration 38, loss = 0.00128761 Iteration 39, loss = 0.00119980 Iteration 40, loss = 0.00113740 Iteration 41, loss = 0.00117145 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.23861754 Iteration 2, loss = 0.04083172 Iteration 3, loss = 0.01874540 Iteration 4, loss = 0.01194708 Iteration 5, loss = 0.00895941 Iteration 6, loss = 0.00715707 Iteration 7, loss = 0.00615421 Iteration 8, loss = 0.00540814 Iteration 9, loss = 0.00480028 Iteration 10, loss = 0.00448895 Iteration 11, loss = 0.00390825 Iteration 12, loss = 0.00376481 Iteration 13, loss = 0.00352378 Iteration 14, loss = 0.00321789 Iteration 15, loss = 0.00311521 Iteration 16, loss = 0.00301088 Iteration 17, loss = 0.00285834 Iteration 18, loss = 0.00265569 Iteration 19, loss = 0.00263248 Iteration 20, loss = 0.00243057 Iteration 21, loss = 0.00244471 Iteration 22, loss = 0.00223545 Iteration 23, loss = 0.00223272 Iteration 24, loss = 0.00204006 Iteration 25, loss = 0.00200273 Iteration 26, loss = 0.00185391 Iteration 27, loss = 0.00175707 Iteration 28, loss = 0.00183359 Iteration 29, loss = 0.00167485 Iteration 30, loss = 0.00175561 Iteration 31, loss = 0.00149122 Iteration 32, loss = 0.00148183 Iteration 33, loss = 0.00150350 Iteration 34, loss = 0.00150163 Iteration 35, loss = 0.00139199 Iteration 36, loss = 0.00128620 Iteration 37, loss = 0.00131320 Iteration 38, loss = 0.00121363 Iteration 39, loss = 0.00116215 Iteration 40, loss = 0.00110668 Iteration 41, loss = 0.00111385 Iteration 42, loss = 0.00103909 Iteration 43, loss = 0.00108183 Iteration 44, loss = 0.00101834 Iteration 45, loss = 0.00108459 Iteration 46, loss = 0.00097092 Iteration 47, loss = 0.00090373 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.23427989 Iteration 2, loss = 0.03909752 Iteration 3, loss = 0.01878109 Iteration 4, loss = 0.01260246 Iteration 5, loss = 0.00936589 Iteration 6, loss = 0.00783115 Iteration 7, loss = 0.00677752 Iteration 8, loss = 0.00580865 Iteration 9, loss = 0.00512368 Iteration 10, loss = 0.00485446 Iteration 11, loss = 0.00434506 Iteration 12, loss = 0.00411127 Iteration 13, loss = 0.00383073 Iteration 14, loss = 0.00347913 Iteration 15, loss = 0.00344403 Iteration 16, loss = 0.00314034 Iteration 17, loss = 0.00286209 Iteration 18, loss = 0.00278498 Iteration 19, loss = 0.00262499 Iteration 20, loss = 0.00262180 Iteration 21, loss = 0.00249278 Iteration 22, loss = 0.00237500 Iteration 23, loss = 0.00228295 Iteration 24, loss = 0.00209135 Iteration 25, loss = 0.00193953 Iteration 26, loss = 0.00195858 Iteration 27, loss = 0.00194059 Iteration 28, loss = 0.00177783 Iteration 29, loss = 0.00178071 Iteration 30, loss = 0.00163076 Iteration 31, loss = 0.00176217 Iteration 32, loss = 0.00156825 Iteration 33, loss = 0.00151062 Iteration 34, loss = 0.00145712 Iteration 35, loss = 0.00143863 Iteration 36, loss = 0.00143827 Iteration 37, loss = 0.00133216 Iteration 38, loss = 0.00126642 Iteration 39, loss = 0.00129484 Iteration 40, loss = 0.00116656 Iteration 41, loss = 0.00128548 Iteration 42, loss = 0.00113565 Iteration 43, loss = 0.00108329 Iteration 44, loss = 0.00106681 Iteration 45, loss = 0.00099433 Iteration 46, loss = 0.00104834 Iteration 47, loss = 0.00096211 Iteration 48, loss = 0.00092780 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.35420830 Iteration 2, loss = 0.05350313 Iteration 3, loss = 0.02321795 Iteration 4, loss = 0.01433166 Iteration 5, loss = 0.01034766 Iteration 6, loss = 0.00824634 Iteration 7, loss = 0.00690042 Iteration 8, loss = 0.00602657 Iteration 9, loss = 0.00532516 Iteration 10, loss = 0.00481030 Iteration 11, loss = 0.00435863 Iteration 12, loss = 0.00396425 Iteration 13, loss = 0.00369811 Iteration 14, loss = 0.00343884 Iteration 15, loss = 0.00309005 Iteration 16, loss = 0.00289200 Iteration 17, loss = 0.00271881 Iteration 18, loss = 0.00259350 Iteration 19, loss = 0.00232872 Iteration 20, loss = 0.00231992 Iteration 21, loss = 0.00210570 Iteration 22, loss = 0.00198097 Iteration 23, loss = 0.00188301 Iteration 24, loss = 0.00180654 Iteration 25, loss = 0.00175324 Iteration 26, loss = 0.00176164 Iteration 27, loss = 0.00170620 Iteration 28, loss = 0.00148240 Iteration 29, loss = 0.00141018 Iteration 30, loss = 0.00135295 Iteration 31, loss = 0.00132616 Iteration 32, loss = 0.00123050 Iteration 33, loss = 0.00117226 Iteration 34, loss = 0.00114238 Iteration 35, loss = 0.00110855 Iteration 36, loss = 0.00110669 Iteration 37, loss = 0.00099994 Iteration 38, loss = 0.00101166 Iteration 39, loss = 0.00097884 Iteration 40, loss = 0.00095237 Iteration 41, loss = 0.00089912 Iteration 42, loss = 0.00085718 Iteration 43, loss = 0.00094637 Iteration 44, loss = 0.00083283 Iteration 45, loss = 0.00079866 Iteration 46, loss = 0.00078151 Iteration 47, loss = 0.00078260 Iteration 48, loss = 0.00074529 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.14379704 Iteration 2, loss = 0.02539327 Iteration 3, loss = 0.01313462 Iteration 4, loss = 0.00926153 Iteration 5, loss = 0.00729351 Iteration 6, loss = 0.00613286 Iteration 7, loss = 0.00534531 Iteration 8, loss = 0.00481202 Iteration 9, loss = 0.00444930 Iteration 10, loss = 0.00406617 Iteration 11, loss = 0.00365260 Iteration 12, loss = 0.00336168 Iteration 13, loss = 0.00306252 Iteration 14, loss = 0.00284424 Iteration 15, loss = 0.00272956 Iteration 16, loss = 0.00251675 Iteration 17, loss = 0.00234067 Iteration 18, loss = 0.00231949 Iteration 19, loss = 0.00224156 Iteration 20, loss = 0.00208487 Iteration 21, loss = 0.00201530 Iteration 22, loss = 0.00196213 Iteration 23, loss = 0.00191886 Iteration 24, loss = 0.00167546 Iteration 25, loss = 0.00165249 Iteration 26, loss = 0.00177833 Iteration 27, loss = 0.00159637 Iteration 28, loss = 0.00140292 Iteration 29, loss = 0.00136200 Iteration 30, loss = 0.00128149 Iteration 31, loss = 0.00125303 Iteration 32, loss = 0.00124424 Iteration 33, loss = 0.00116217 Iteration 34, loss = 0.00114567 Iteration 35, loss = 0.00112681 Iteration 36, loss = 0.00105258 Iteration 37, loss = 0.00104903 Iteration 38, loss = 0.00097741 Iteration 39, loss = 0.00097474 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.31215511 Iteration 2, loss = 0.05388564 Iteration 3, loss = 0.02407728 Iteration 4, loss = 0.01487611 Iteration 5, loss = 0.01104088 Iteration 6, loss = 0.00873267 Iteration 7, loss = 0.00732591 Iteration 8, loss = 0.00644352 Iteration 9, loss = 0.00570291 Iteration 10, loss = 0.00515595 Iteration 11, loss = 0.00465571 Iteration 12, loss = 0.00431720 Iteration 13, loss = 0.00406420 Iteration 14, loss = 0.00357835 Iteration 15, loss = 0.00350006 Iteration 16, loss = 0.00316416 Iteration 17, loss = 0.00316007 Iteration 18, loss = 0.00285043 Iteration 19, loss = 0.00269168 Iteration 20, loss = 0.00255971 Iteration 21, loss = 0.00237803 Iteration 22, loss = 0.00236568 Iteration 23, loss = 0.00216232 Iteration 24, loss = 0.00212114 Iteration 25, loss = 0.00198021 Iteration 26, loss = 0.00196604 Iteration 27, loss = 0.00185499 Iteration 28, loss = 0.00194964 Iteration 29, loss = 0.00167946 Iteration 30, loss = 0.00173323 Iteration 31, loss = 0.00164310 Iteration 32, loss = 0.00151194 Iteration 33, loss = 0.00137592 Iteration 34, loss = 0.00143968 Iteration 35, loss = 0.00137675 Iteration 36, loss = 0.00130338 Iteration 37, loss = 0.00122664 Iteration 38, loss = 0.00149604 Iteration 39, loss = 0.00115136 Iteration 40, loss = 0.00112638 Iteration 41, loss = 0.00108620 Iteration 42, loss = 0.00110715 Iteration 43, loss = 0.00112867 Iteration 44, loss = 0.00101979 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.30205802 Iteration 2, loss = 0.04640689 Iteration 3, loss = 0.02019505 Iteration 4, loss = 0.01252912 Iteration 5, loss = 0.00921781 Iteration 6, loss = 0.00731315 Iteration 7, loss = 0.00615292 Iteration 8, loss = 0.00539340 Iteration 9, loss = 0.00473052 Iteration 10, loss = 0.00442047 Iteration 11, loss = 0.00393743 Iteration 12, loss = 0.00362285 Iteration 13, loss = 0.00343632 Iteration 14, loss = 0.00308194 Iteration 15, loss = 0.00303372 Iteration 16, loss = 0.00277037 Iteration 17, loss = 0.00268003 Iteration 18, loss = 0.00254747 Iteration 19, loss = 0.00238025 Iteration 20, loss = 0.00224314 Iteration 21, loss = 0.00218021 Iteration 22, loss = 0.00212409 Iteration 23, loss = 0.00191685 Iteration 24, loss = 0.00190513 Iteration 25, loss = 0.00179750 Iteration 26, loss = 0.00184965 Iteration 27, loss = 0.00167051 Iteration 28, loss = 0.00165132 Iteration 29, loss = 0.00157213 Iteration 30, loss = 0.00150516 Iteration 31, loss = 0.00149182 Iteration 32, loss = 0.00143793 Iteration 33, loss = 0.00132654 Iteration 34, loss = 0.00136835 Iteration 35, loss = 0.00137474 Iteration 36, loss = 0.00126155 Iteration 37, loss = 0.00119850 Iteration 38, loss = 0.00113984 Iteration 39, loss = 0.00111250 Iteration 40, loss = 0.00106113 Iteration 41, loss = 0.00104581 Iteration 42, loss = 0.00102777 Iteration 43, loss = 0.00098594 Iteration 44, loss = 0.00096739 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.18932458 Iteration 2, loss = 0.03376875 Iteration 3, loss = 0.01646374 Iteration 4, loss = 0.01103388 Iteration 5, loss = 0.00848614 Iteration 6, loss = 0.00694208 Iteration 7, loss = 0.00601453 Iteration 8, loss = 0.00528348 Iteration 9, loss = 0.00483978 Iteration 10, loss = 0.00444299 Iteration 11, loss = 0.00398597 Iteration 12, loss = 0.00367132 Iteration 13, loss = 0.00341675 Iteration 14, loss = 0.00311407 Iteration 15, loss = 0.00294079 Iteration 16, loss = 0.00271470 Iteration 17, loss = 0.00251004 Iteration 18, loss = 0.00248971 Iteration 19, loss = 0.00228825 Iteration 20, loss = 0.00215013 Iteration 21, loss = 0.00210182 Iteration 22, loss = 0.00192879 Iteration 23, loss = 0.00182757 Iteration 24, loss = 0.00181910 Iteration 25, loss = 0.00181448 Iteration 26, loss = 0.00200026 Iteration 27, loss = 0.00158498 Iteration 28, loss = 0.00152152 Iteration 29, loss = 0.00146196 Iteration 30, loss = 0.00143337 Iteration 31, loss = 0.00138845 Iteration 32, loss = 0.00132316 Iteration 33, loss = 0.00130052 Iteration 34, loss = 0.00137898 Iteration 35, loss = 0.00119958 Iteration 36, loss = 0.00111804 Iteration 37, loss = 0.00110319 Iteration 38, loss = 0.00105518 Iteration 39, loss = 0.00103493 Iteration 40, loss = 0.00101354 Iteration 41, loss = 0.00099832 Iteration 42, loss = 0.00095871 Iteration 43, loss = 0.00094677 Iteration 44, loss = 0.00124826 Iteration 45, loss = 0.00087727 Iteration 46, loss = 0.00083687 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.33111315 Iteration 2, loss = 0.05405517 Iteration 3, loss = 0.02278525 Iteration 4, loss = 0.01397578 Iteration 5, loss = 0.01027654 Iteration 6, loss = 0.00819986 Iteration 7, loss = 0.00687233 Iteration 8, loss = 0.00598212 Iteration 9, loss = 0.00533450 Iteration 10, loss = 0.00475985 Iteration 11, loss = 0.00438063 Iteration 12, loss = 0.00408355 Iteration 13, loss = 0.00373982 Iteration 14, loss = 0.00353952 Iteration 15, loss = 0.00333036 Iteration 16, loss = 0.00312064 Iteration 17, loss = 0.00301389 Iteration 18, loss = 0.00281575 Iteration 19, loss = 0.00272444 Iteration 20, loss = 0.00264458 Iteration 21, loss = 0.00244718 Iteration 22, loss = 0.00242503 Iteration 23, loss = 0.00232876 Iteration 24, loss = 0.00219474 Iteration 25, loss = 0.00215749 Iteration 26, loss = 0.00205644 Iteration 27, loss = 0.00197310 Iteration 28, loss = 0.00189677 Iteration 29, loss = 0.00184455 Iteration 30, loss = 0.00182569 Iteration 31, loss = 0.00165630 Iteration 32, loss = 0.00162862 Iteration 33, loss = 0.00157777 Iteration 34, loss = 0.00155145 Iteration 35, loss = 0.00146323 Iteration 36, loss = 0.00141824 Iteration 37, loss = 0.00134793 Iteration 38, loss = 0.00142766 Iteration 39, loss = 0.00134595 Iteration 40, loss = 0.00127933 Iteration 41, loss = 0.00121665 Iteration 42, loss = 0.00118942 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.30125700 Iteration 2, loss = 0.04793800 Iteration 3, loss = 0.02140183 Iteration 4, loss = 0.01361641 Iteration 5, loss = 0.01009746 Iteration 6, loss = 0.00825902 Iteration 7, loss = 0.00697787 Iteration 8, loss = 0.00623922 Iteration 9, loss = 0.00544406 Iteration 10, loss = 0.00498316 Iteration 11, loss = 0.00456631 Iteration 12, loss = 0.00421429 Iteration 13, loss = 0.00394368 Iteration 14, loss = 0.00370596 Iteration 15, loss = 0.00341763 Iteration 16, loss = 0.00330326 Iteration 17, loss = 0.00312309 Iteration 18, loss = 0.00297790 Iteration 19, loss = 0.00276745 Iteration 20, loss = 0.00254642 Iteration 21, loss = 0.00249755 Iteration 22, loss = 0.00239719 Iteration 23, loss = 0.00237989 Iteration 24, loss = 0.00217684 Iteration 25, loss = 0.00207346 Iteration 26, loss = 0.00197287 Iteration 27, loss = 0.00189857 Iteration 28, loss = 0.00179747 Iteration 29, loss = 0.00178127 Iteration 30, loss = 0.00176714 Iteration 31, loss = 0.00174455 Iteration 32, loss = 0.00157103 Iteration 33, loss = 0.00154944 Iteration 34, loss = 0.00151020 Iteration 35, loss = 0.00144088 Iteration 36, loss = 0.00145580 Iteration 37, loss = 0.00162138 Iteration 38, loss = 0.00138616 Iteration 39, loss = 0.00128051 Iteration 40, loss = 0.00125444 Iteration 41, loss = 0.00145386 Iteration 42, loss = 0.00141041 Iteration 43, loss = 0.00121882 Iteration 44, loss = 0.00114096 Iteration 45, loss = 0.00110176 Iteration 46, loss = 0.00110501 Iteration 47, loss = 0.00108407 Iteration 48, loss = 0.00102647 Iteration 49, loss = 0.00100597 Iteration 50, loss = 0.00100418 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.17406334 Iteration 2, loss = 0.02890694 Iteration 3, loss = 0.01460723 Iteration 4, loss = 0.00988496 Iteration 5, loss = 0.00765855 Iteration 6, loss = 0.00642703 Iteration 7, loss = 0.00548330 Iteration 8, loss = 0.00509837 Iteration 9, loss = 0.00444010 Iteration 10, loss = 0.00414392 Iteration 11, loss = 0.00368982 Iteration 12, loss = 0.00352456 Iteration 13, loss = 0.00327824 Iteration 14, loss = 0.00310972 Iteration 15, loss = 0.00275846 Iteration 16, loss = 0.00255078 Iteration 17, loss = 0.00238843 Iteration 18, loss = 0.00240267 Iteration 19, loss = 0.00222044 Iteration 20, loss = 0.00210995 Iteration 21, loss = 0.00211896 Iteration 22, loss = 0.00196786 Iteration 23, loss = 0.00179066 Iteration 24, loss = 0.00184717 Iteration 25, loss = 0.00162000 Iteration 26, loss = 0.00167455 Iteration 27, loss = 0.00153537 Iteration 28, loss = 0.00145295 Iteration 29, loss = 0.00175250 Iteration 30, loss = 0.00161700 Iteration 31, loss = 0.00133703 Iteration 32, loss = 0.00126625 Iteration 33, loss = 0.00123282 Iteration 34, loss = 0.00118707 Iteration 35, loss = 0.00113676 Iteration 36, loss = 0.00111975 Iteration 37, loss = 0.00103222 Iteration 38, loss = 0.00107708 Iteration 39, loss = 0.00107867 Iteration 40, loss = 0.00094645 Iteration 41, loss = 0.00093141 Iteration 42, loss = 0.00087365 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.29170922 Iteration 2, loss = 0.04691221 Iteration 3, loss = 0.02029776 Iteration 4, loss = 0.01264006 Iteration 5, loss = 0.00928236 Iteration 6, loss = 0.00754367 Iteration 7, loss = 0.00622803 Iteration 8, loss = 0.00539709 Iteration 9, loss = 0.00475725 Iteration 10, loss = 0.00430141 Iteration 11, loss = 0.00398651 Iteration 12, loss = 0.00360165 Iteration 13, loss = 0.00330838 Iteration 14, loss = 0.00311872 Iteration 15, loss = 0.00290971 Iteration 16, loss = 0.00294028 Iteration 17, loss = 0.00264386 Iteration 18, loss = 0.00258302 Iteration 19, loss = 0.00238138 Iteration 20, loss = 0.00227154 Iteration 21, loss = 0.00216176 Iteration 22, loss = 0.00203767 Iteration 23, loss = 0.00204494 Iteration 24, loss = 0.00185990 Iteration 25, loss = 0.00182227 Iteration 26, loss = 0.00175235 Iteration 27, loss = 0.00174804 Iteration 28, loss = 0.00165572 Iteration 29, loss = 0.00158351 Iteration 30, loss = 0.00152666 Iteration 31, loss = 0.00143365 Iteration 32, loss = 0.00152595 Iteration 33, loss = 0.00144344 Iteration 34, loss = 0.00135040 Iteration 35, loss = 0.00147201 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.29492704 Iteration 2, loss = 0.04733597 Iteration 3, loss = 0.02078652 Iteration 4, loss = 0.01326540 Iteration 5, loss = 0.00974311 Iteration 6, loss = 0.00791568 Iteration 7, loss = 0.00659425 Iteration 8, loss = 0.00583376 Iteration 9, loss = 0.00513936 Iteration 10, loss = 0.00459943 Iteration 11, loss = 0.00443097 Iteration 12, loss = 0.00401450 Iteration 13, loss = 0.00362371 Iteration 14, loss = 0.00346552 Iteration 15, loss = 0.00331468 Iteration 16, loss = 0.00314089 Iteration 17, loss = 0.00297391 Iteration 18, loss = 0.00282335 Iteration 19, loss = 0.00262633 Iteration 20, loss = 0.00249588 Iteration 21, loss = 0.00237761 Iteration 22, loss = 0.00232883 Iteration 23, loss = 0.00235950 Iteration 24, loss = 0.00212323 Iteration 25, loss = 0.00201502 Iteration 26, loss = 0.00198145 Iteration 27, loss = 0.00186283 Iteration 28, loss = 0.00180176 Iteration 29, loss = 0.00179960 Iteration 30, loss = 0.00178123 Iteration 31, loss = 0.00160195 Iteration 32, loss = 0.00158049 Iteration 33, loss = 0.00147459 Iteration 34, loss = 0.00159539 Iteration 35, loss = 0.00146920 Iteration 36, loss = 0.00134275 Iteration 37, loss = 0.00128822 Iteration 38, loss = 0.00132750 Iteration 39, loss = 0.00122274 Iteration 40, loss = 0.00126406 Iteration 41, loss = 0.00122067 Iteration 42, loss = 0.00111214 Iteration 43, loss = 0.00107648 Iteration 44, loss = 0.00124116 Iteration 45, loss = 0.00108006 Iteration 46, loss = 0.00100041 Iteration 47, loss = 0.00098017 Iteration 48, loss = 0.00096176 Iteration 49, loss = 0.00097801 Iteration 50, loss = 0.00091526 Iteration 51, loss = 0.00088871 Iteration 52, loss = 0.00084544 Iteration 53, loss = 0.00081981 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.10306164 Iteration 2, loss = 0.01884140 Iteration 3, loss = 0.01014497 Iteration 4, loss = 0.00713672 Iteration 5, loss = 0.00554327 Iteration 6, loss = 0.00485305 Iteration 7, loss = 0.00405208 Iteration 8, loss = 0.00357997 Iteration 9, loss = 0.00322193 Iteration 10, loss = 0.00289996 Iteration 11, loss = 0.00265761 Iteration 12, loss = 0.00243362 Iteration 13, loss = 0.00220973 Iteration 14, loss = 0.00211593 Iteration 15, loss = 0.00195206 Iteration 16, loss = 0.00195164 Iteration 17, loss = 0.00168292 Iteration 18, loss = 0.00167476 Iteration 19, loss = 0.00157166 Iteration 20, loss = 0.00147619 Iteration 21, loss = 0.00137468 Iteration 22, loss = 0.00132353 Iteration 23, loss = 0.00131362 Iteration 24, loss = 0.00124752 Iteration 25, loss = 0.00127365 Iteration 26, loss = 0.00105348 Iteration 27, loss = 0.00105618 Iteration 28, loss = 0.00096915 Iteration 29, loss = 0.00105473 Iteration 30, loss = 0.00093285 Iteration 31, loss = 0.00106259 Iteration 32, loss = 0.00087922 Iteration 33, loss = 0.00085103 Iteration 34, loss = 0.00084360 Iteration 35, loss = 0.00086358 Iteration 36, loss = 0.00070948 Iteration 37, loss = 0.00073336 Iteration 38, loss = 0.00072127 Iteration 39, loss = 0.00071247 Iteration 40, loss = 0.00066908 Iteration 41, loss = 0.00070567 Iteration 42, loss = 0.00060384 Iteration 43, loss = 0.00060054 Iteration 44, loss = 0.00058940 Iteration 45, loss = 0.00056155 Iteration 46, loss = 0.00057673 Iteration 47, loss = 0.00054425 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.22003305 Iteration 2, loss = 0.03660825 Iteration 3, loss = 0.01715149 Iteration 4, loss = 0.01118139 Iteration 5, loss = 0.00846132 Iteration 6, loss = 0.00680246 Iteration 7, loss = 0.00590473 Iteration 8, loss = 0.00509946 Iteration 9, loss = 0.00465902 Iteration 10, loss = 0.00429967 Iteration 11, loss = 0.00389145 Iteration 12, loss = 0.00364629 Iteration 13, loss = 0.00334144 Iteration 14, loss = 0.00307315 Iteration 15, loss = 0.00293715 Iteration 16, loss = 0.00279660 Iteration 17, loss = 0.00263073 Iteration 18, loss = 0.00246696 Iteration 19, loss = 0.00241521 Iteration 20, loss = 0.00222766 Iteration 21, loss = 0.00218573 Iteration 22, loss = 0.00222245 Iteration 23, loss = 0.00196199 Iteration 24, loss = 0.00193695 Iteration 25, loss = 0.00185496 Iteration 26, loss = 0.00181712 Iteration 27, loss = 0.00159277 Iteration 28, loss = 0.00159564 Iteration 29, loss = 0.00155792 Iteration 30, loss = 0.00144336 Iteration 31, loss = 0.00141447 Iteration 32, loss = 0.00137170 Iteration 33, loss = 0.00135475 Iteration 34, loss = 0.00125328 Iteration 35, loss = 0.00129061 Iteration 36, loss = 0.00119417 Iteration 37, loss = 0.00118974 Iteration 38, loss = 0.00109839 Iteration 39, loss = 0.00115544 Iteration 40, loss = 0.00102744 Iteration 41, loss = 0.00101690 Iteration 42, loss = 0.00105148 Iteration 43, loss = 0.00096554 Iteration 44, loss = 0.00089542 Iteration 45, loss = 0.00096628 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.18554606 Iteration 2, loss = 0.02986242 Iteration 3, loss = 0.01421830 Iteration 4, loss = 0.00938650 Iteration 5, loss = 0.00711860 Iteration 6, loss = 0.00573720 Iteration 7, loss = 0.00495215 Iteration 8, loss = 0.00423450 Iteration 9, loss = 0.00389985 Iteration 10, loss = 0.00346795 Iteration 11, loss = 0.00311058 Iteration 12, loss = 0.00296484 Iteration 13, loss = 0.00258441 Iteration 14, loss = 0.00244526 Iteration 15, loss = 0.00227027 Iteration 16, loss = 0.00213347 Iteration 17, loss = 0.00213092 Iteration 18, loss = 0.00186362 Iteration 19, loss = 0.00178099 Iteration 20, loss = 0.00167952 Iteration 21, loss = 0.00155983 Iteration 22, loss = 0.00158780 Iteration 23, loss = 0.00143772 Iteration 24, loss = 0.00134775 Iteration 25, loss = 0.00137501 Iteration 26, loss = 0.00129834 Iteration 27, loss = 0.00139435 Iteration 28, loss = 0.00113279 Iteration 29, loss = 0.00114267 Iteration 30, loss = 0.00113115 Iteration 31, loss = 0.00112652 Iteration 32, loss = 0.00102055 Iteration 33, loss = 0.00098252 Iteration 34, loss = 0.00098439 Iteration 35, loss = 0.00091056 Iteration 36, loss = 0.00107245 Iteration 37, loss = 0.00113776 Iteration 38, loss = 0.00095703 Iteration 39, loss = 0.00080739 Iteration 40, loss = 0.00079007 Iteration 41, loss = 0.00081077 Iteration 42, loss = 0.00074729 Iteration 43, loss = 0.00076408 Iteration 44, loss = 0.00071338 Iteration 45, loss = 0.00075987 Iteration 46, loss = 0.00067494 Iteration 47, loss = 0.00065577 Iteration 48, loss = 0.00065017 Iteration 49, loss = 0.00063124 Iteration 50, loss = 0.00059716 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.18951596 Iteration 2, loss = 0.03295571 Iteration 3, loss = 0.01629055 Iteration 4, loss = 0.01095258 Iteration 5, loss = 0.00838736 Iteration 6, loss = 0.00700860 Iteration 7, loss = 0.00605591 Iteration 8, loss = 0.00529727 Iteration 9, loss = 0.00477043 Iteration 10, loss = 0.00430349 Iteration 11, loss = 0.00393323 Iteration 12, loss = 0.00373205 Iteration 13, loss = 0.00346127 Iteration 14, loss = 0.00315281 Iteration 15, loss = 0.00292012 Iteration 16, loss = 0.00276302 Iteration 17, loss = 0.00252760 Iteration 18, loss = 0.00241422 Iteration 19, loss = 0.00235335 Iteration 20, loss = 0.00226892 Iteration 21, loss = 0.00210583 Iteration 22, loss = 0.00199230 Iteration 23, loss = 0.00193931 Iteration 24, loss = 0.00204500 Iteration 25, loss = 0.00186231 Iteration 26, loss = 0.00174046 Iteration 27, loss = 0.00176711 Iteration 28, loss = 0.00155714 Iteration 29, loss = 0.00152454 Iteration 30, loss = 0.00150236 Iteration 31, loss = 0.00149198 Iteration 32, loss = 0.00139962 Iteration 33, loss = 0.00140005 Iteration 34, loss = 0.00148928 Iteration 35, loss = 0.00127617 Iteration 36, loss = 0.00120343 Iteration 37, loss = 0.00118234 Iteration 38, loss = 0.00113987 Iteration 39, loss = 0.00122191 Iteration 40, loss = 0.00108896 Iteration 41, loss = 0.00103314 Iteration 42, loss = 0.00116529 Iteration 43, loss = 0.00107645 Iteration 44, loss = 0.00107121 Iteration 45, loss = 0.00093189 Iteration 46, loss = 0.00089663 Iteration 47, loss = 0.00088066 Iteration 48, loss = 0.00089841 Iteration 49, loss = 0.00085345 Iteration 50, loss = 0.00081008 Iteration 51, loss = 0.00080933 Iteration 52, loss = 0.00079472 Iteration 53, loss = 0.00075345 Iteration 54, loss = 0.00082846 Iteration 55, loss = 0.00088774 Iteration 56, loss = 0.00069183 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.19039497 Iteration 2, loss = 0.03348822 Iteration 3, loss = 0.01583706 Iteration 4, loss = 0.01030749 Iteration 5, loss = 0.00799488 Iteration 6, loss = 0.00655836 Iteration 7, loss = 0.00545337 Iteration 8, loss = 0.00484857 Iteration 9, loss = 0.00433531 Iteration 10, loss = 0.00377128 Iteration 11, loss = 0.00335164 Iteration 12, loss = 0.00324167 Iteration 13, loss = 0.00295021 Iteration 14, loss = 0.00270726 Iteration 15, loss = 0.00253615 Iteration 16, loss = 0.00237724 Iteration 17, loss = 0.00234978 Iteration 18, loss = 0.00208657 Iteration 19, loss = 0.00199264 Iteration 20, loss = 0.00187800 Iteration 21, loss = 0.00193392 Iteration 22, loss = 0.00176819 Iteration 23, loss = 0.00171704 Iteration 24, loss = 0.00165455 Iteration 25, loss = 0.00158744 Iteration 26, loss = 0.00147086 Iteration 27, loss = 0.00144621 Iteration 28, loss = 0.00135742 Iteration 29, loss = 0.00149796 Iteration 30, loss = 0.00137544 Iteration 31, loss = 0.00124278 Iteration 32, loss = 0.00118880 Iteration 33, loss = 0.00110834 Iteration 34, loss = 0.00121534 Iteration 35, loss = 0.00114789 Iteration 36, loss = 0.00105878 Iteration 37, loss = 0.00105184 Iteration 38, loss = 0.00098430 Iteration 39, loss = 0.00093841 Iteration 40, loss = 0.00103188 Iteration 41, loss = 0.00087006 Iteration 42, loss = 0.00095537 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.37115008 Iteration 2, loss = 0.05860650 Iteration 3, loss = 0.02436882 Iteration 4, loss = 0.01467491 Iteration 5, loss = 0.01052377 Iteration 6, loss = 0.00823571 Iteration 7, loss = 0.00685453 Iteration 8, loss = 0.00594923 Iteration 9, loss = 0.00526619 Iteration 10, loss = 0.00469367 Iteration 11, loss = 0.00428969 Iteration 12, loss = 0.00386784 Iteration 13, loss = 0.00355982 Iteration 14, loss = 0.00334441 Iteration 15, loss = 0.00311678 Iteration 16, loss = 0.00278128 Iteration 17, loss = 0.00275829 Iteration 18, loss = 0.00244668 Iteration 19, loss = 0.00231704 Iteration 20, loss = 0.00222809 Iteration 21, loss = 0.00213775 Iteration 22, loss = 0.00196691 Iteration 23, loss = 0.00185062 Iteration 24, loss = 0.00184448 Iteration 25, loss = 0.00172050 Iteration 26, loss = 0.00164289 Iteration 27, loss = 0.00158376 Iteration 28, loss = 0.00146084 Iteration 29, loss = 0.00136764 Iteration 30, loss = 0.00131023 Iteration 31, loss = 0.00138597 Iteration 32, loss = 0.00124413 Iteration 33, loss = 0.00116635 Iteration 34, loss = 0.00117957 Iteration 35, loss = 0.00121804 Iteration 36, loss = 0.00111266 Iteration 37, loss = 0.00100435 Iteration 38, loss = 0.00119978 Iteration 39, loss = 0.00100402 Iteration 40, loss = 0.00095674 Iteration 41, loss = 0.00092549 Iteration 42, loss = 0.00088986 Iteration 43, loss = 0.00086798 Iteration 44, loss = 0.00097641 Iteration 45, loss = 0.00085523 Iteration 46, loss = 0.00081403 Iteration 47, loss = 0.00078280 Iteration 48, loss = 0.00075312 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.34077085 Iteration 2, loss = 0.05168912 Iteration 3, loss = 0.02188939 Iteration 4, loss = 0.01337124 Iteration 5, loss = 0.00971036 Iteration 6, loss = 0.00770376 Iteration 7, loss = 0.00641207 Iteration 8, loss = 0.00554756 Iteration 9, loss = 0.00484261 Iteration 10, loss = 0.00436923 Iteration 11, loss = 0.00399416 Iteration 12, loss = 0.00362181 Iteration 13, loss = 0.00325189 Iteration 14, loss = 0.00311464 Iteration 15, loss = 0.00282311 Iteration 16, loss = 0.00262946 Iteration 17, loss = 0.00247540 Iteration 18, loss = 0.00232545 Iteration 19, loss = 0.00219754 Iteration 20, loss = 0.00208712 Iteration 21, loss = 0.00197100 Iteration 22, loss = 0.00190209 Iteration 23, loss = 0.00180734 Iteration 24, loss = 0.00173576 Iteration 25, loss = 0.00168314 Iteration 26, loss = 0.00159304 Iteration 27, loss = 0.00153197 Iteration 28, loss = 0.00146838 Iteration 29, loss = 0.00142733 Iteration 30, loss = 0.00138830 Iteration 31, loss = 0.00133620 Iteration 32, loss = 0.00127229 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping.
| precision | recall | f1-score | support | |
|---|---|---|---|---|
| CV = 20 | ||||
| Normal | 0.9993 ± 0.0004 | 0.9999 ± 0.0002 | 0.9996 ± 0.0003 | 1991.0000 ± 0.0000 |
| Fraud | 0.8333 ± 0.3206 | 0.5167 ± 0.2882 | 0.6133 ± 0.2811 | 3.0000 ± 0.0000 |
| accuracy | 0.9992 ± 0.0005 | 0.9992 ± 0.0005 | 0.9992 ± 0.0005 | 0.9992 ± 0.0005 |
| macro avg | 0.9163 ± 0.1604 | 0.7583 ± 0.1441 | 0.8065 ± 0.1407 | 1994.0000 ± 0.0000 |
| weighted avg | 0.9990 ± 0.0008 | 0.9992 ± 0.0005 | 0.9990 ± 0.0007 | 1994.0000 ± 0.0000 |
Test Set =========================================================================================== Iteration 1, loss = 0.24329350 Iteration 2, loss = 0.07769087 Iteration 3, loss = 0.03760005 Iteration 4, loss = 0.02319636 Iteration 5, loss = 0.01621913 Iteration 6, loss = 0.01206347 Iteration 7, loss = 0.00977473 Iteration 8, loss = 0.00793083 Iteration 9, loss = 0.00667472 Iteration 10, loss = 0.00574732 Iteration 11, loss = 0.00521044 Iteration 12, loss = 0.00440526 Iteration 13, loss = 0.00463492 Iteration 14, loss = 0.00374956 Iteration 15, loss = 0.00345166 Iteration 16, loss = 0.00302357 Iteration 17, loss = 0.00296131 Iteration 18, loss = 0.00283845 Iteration 19, loss = 0.00261734 Iteration 20, loss = 0.00236834 Iteration 21, loss = 0.00247095 Iteration 22, loss = 0.00207081 Iteration 23, loss = 0.00196146 Iteration 24, loss = 0.00205042 Iteration 25, loss = 0.00185515 Iteration 26, loss = 0.00176470 Iteration 27, loss = 0.00165307 Iteration 28, loss = 0.00168681 Iteration 29, loss = 0.00156017 Iteration 30, loss = 0.00157577 Iteration 31, loss = 0.00146409 Iteration 32, loss = 0.00148419 Iteration 33, loss = 0.00139583 Iteration 34, loss = 0.00128207 Iteration 35, loss = 0.00130831 Iteration 36, loss = 0.00118775 Iteration 37, loss = 0.00118266 Iteration 38, loss = 0.00121708 Iteration 39, loss = 0.00111114 Iteration 40, loss = 0.00105557 Iteration 41, loss = 0.00107882 Iteration 42, loss = 0.00102764 Iteration 43, loss = 0.00104313 Iteration 44, loss = 0.00094568 Iteration 45, loss = 0.00092750 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.66467298 Iteration 2, loss = 0.23800934 Iteration 3, loss = 0.11377712 Iteration 4, loss = 0.06457409 Iteration 5, loss = 0.04046454 Iteration 6, loss = 0.02848228 Iteration 7, loss = 0.02134464 Iteration 8, loss = 0.01684738 Iteration 9, loss = 0.01425462 Iteration 10, loss = 0.01151216 Iteration 11, loss = 0.01006239 Iteration 12, loss = 0.00842278 Iteration 13, loss = 0.00744423 Iteration 14, loss = 0.00686727 Iteration 15, loss = 0.00622940 Iteration 16, loss = 0.00573300 Iteration 17, loss = 0.00513124 Iteration 18, loss = 0.00486140 Iteration 19, loss = 0.00426098 Iteration 20, loss = 0.00412142 Iteration 21, loss = 0.00374061 Iteration 22, loss = 0.00355915 Iteration 23, loss = 0.00332891 Iteration 24, loss = 0.00316242 Iteration 25, loss = 0.00294587 Iteration 26, loss = 0.00284739 Iteration 27, loss = 0.00281890 Iteration 28, loss = 0.00265865 Iteration 29, loss = 0.00245826 Iteration 30, loss = 0.00237292 Iteration 31, loss = 0.00227025 Iteration 32, loss = 0.00219069 Iteration 33, loss = 0.00218650 Iteration 34, loss = 0.00209726 Iteration 35, loss = 0.00209897 Iteration 36, loss = 0.00194828 Iteration 37, loss = 0.00193492 Iteration 38, loss = 0.00188072 Iteration 39, loss = 0.00183590 Iteration 40, loss = 0.00176783 Iteration 41, loss = 0.00171137 Iteration 42, loss = 0.00165670 Iteration 43, loss = 0.00161793 Iteration 44, loss = 0.00158799 Iteration 45, loss = 0.00156542 Iteration 46, loss = 0.00148001 Iteration 47, loss = 0.00146789 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.65471610 Iteration 2, loss = 0.20694808 Iteration 3, loss = 0.09409644 Iteration 4, loss = 0.05491458 Iteration 5, loss = 0.03676842 Iteration 6, loss = 0.02685327 Iteration 7, loss = 0.02086110 Iteration 8, loss = 0.01662447 Iteration 9, loss = 0.01388449 Iteration 10, loss = 0.01167574 Iteration 11, loss = 0.01017428 Iteration 12, loss = 0.00901631 Iteration 13, loss = 0.00805340 Iteration 14, loss = 0.00718456 Iteration 15, loss = 0.00660357 Iteration 16, loss = 0.00609096 Iteration 17, loss = 0.00551222 Iteration 18, loss = 0.00511682 Iteration 19, loss = 0.00490918 Iteration 20, loss = 0.00461239 Iteration 21, loss = 0.00434649 Iteration 22, loss = 0.00407963 Iteration 23, loss = 0.00391962 Iteration 24, loss = 0.00375126 Iteration 25, loss = 0.00369908 Iteration 26, loss = 0.00394393 Iteration 27, loss = 0.00346177 Iteration 28, loss = 0.00307474 Iteration 29, loss = 0.00323432 Iteration 30, loss = 0.00287797 Iteration 31, loss = 0.00286986 Iteration 32, loss = 0.00262097 Iteration 33, loss = 0.00293155 Iteration 34, loss = 0.00261832 Iteration 35, loss = 0.00239447 Iteration 36, loss = 0.00229140 Iteration 37, loss = 0.00221558 Iteration 38, loss = 0.00214067 Iteration 39, loss = 0.00209706 Iteration 40, loss = 0.00208999 Iteration 41, loss = 0.00194692 Iteration 42, loss = 0.00191602 Iteration 43, loss = 0.00189184 Iteration 44, loss = 0.00187660 Iteration 45, loss = 0.00174531 Iteration 46, loss = 0.00173475 Iteration 47, loss = 0.00169591 Iteration 48, loss = 0.00165767 Iteration 49, loss = 0.00158506 Iteration 50, loss = 0.00153470 Iteration 51, loss = 0.00148728 Iteration 52, loss = 0.00145797 Iteration 53, loss = 0.00145864 Iteration 54, loss = 0.00153156 Iteration 55, loss = 0.00140252 Iteration 56, loss = 0.00131750 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.55930079 Iteration 2, loss = 0.19275316 Iteration 3, loss = 0.08759354 Iteration 4, loss = 0.04912113 Iteration 5, loss = 0.03183829 Iteration 6, loss = 0.02265360 Iteration 7, loss = 0.01717548 Iteration 8, loss = 0.01355327 Iteration 9, loss = 0.01107813 Iteration 10, loss = 0.00932485 Iteration 11, loss = 0.00799293 Iteration 12, loss = 0.00699639 Iteration 13, loss = 0.00616520 Iteration 14, loss = 0.00552926 Iteration 15, loss = 0.00502519 Iteration 16, loss = 0.00458527 Iteration 17, loss = 0.00425320 Iteration 18, loss = 0.00393468 Iteration 19, loss = 0.00367927 Iteration 20, loss = 0.00345393 Iteration 21, loss = 0.00324797 Iteration 22, loss = 0.00307519 Iteration 23, loss = 0.00292282 Iteration 24, loss = 0.00278117 Iteration 25, loss = 0.00265207 Iteration 26, loss = 0.00253933 Iteration 27, loss = 0.00243256 Iteration 28, loss = 0.00234094 Iteration 29, loss = 0.00224785 Iteration 30, loss = 0.00216616 Iteration 31, loss = 0.00208735 Iteration 32, loss = 0.00201063 Iteration 33, loss = 0.00194708 Iteration 34, loss = 0.00188058 Iteration 35, loss = 0.00181394 Iteration 36, loss = 0.00175680 Iteration 37, loss = 0.00170566 Iteration 38, loss = 0.00164734 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.37143917 Iteration 2, loss = 0.11354619 Iteration 3, loss = 0.05110381 Iteration 4, loss = 0.02996998 Iteration 5, loss = 0.02024920 Iteration 6, loss = 0.01505566 Iteration 7, loss = 0.01172732 Iteration 8, loss = 0.00938823 Iteration 9, loss = 0.00793195 Iteration 10, loss = 0.00683021 Iteration 11, loss = 0.00627376 Iteration 12, loss = 0.00522240 Iteration 13, loss = 0.00488529 Iteration 14, loss = 0.00410470 Iteration 15, loss = 0.00418632 Iteration 16, loss = 0.00369678 Iteration 17, loss = 0.00307020 Iteration 18, loss = 0.00295785 Iteration 19, loss = 0.00277400 Iteration 20, loss = 0.00259852 Iteration 21, loss = 0.00230888 Iteration 22, loss = 0.00230217 Iteration 23, loss = 0.00220461 Iteration 24, loss = 0.00193375 Iteration 25, loss = 0.00187364 Iteration 26, loss = 0.00176246 Iteration 27, loss = 0.00166082 Iteration 28, loss = 0.00159531 Iteration 29, loss = 0.00159048 Iteration 30, loss = 0.00165202 Iteration 31, loss = 0.00145300 Iteration 32, loss = 0.00134771 Iteration 33, loss = 0.00126105 Iteration 34, loss = 0.00119941 Iteration 35, loss = 0.00115760 Iteration 36, loss = 0.00115241 Iteration 37, loss = 0.00103578 Iteration 38, loss = 0.00100473 Iteration 39, loss = 0.00098152 Iteration 40, loss = 0.00098656 Iteration 41, loss = 0.00089735 Iteration 42, loss = 0.00093104 Iteration 43, loss = 0.00086811 Iteration 44, loss = 0.00085091 Iteration 45, loss = 0.00089097 Iteration 46, loss = 0.00078494 Iteration 47, loss = 0.00076035 Iteration 48, loss = 0.00074361 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.32924479 Iteration 2, loss = 0.10027403 Iteration 3, loss = 0.04825747 Iteration 4, loss = 0.02972666 Iteration 5, loss = 0.02067744 Iteration 6, loss = 0.01570538 Iteration 7, loss = 0.01217895 Iteration 8, loss = 0.00986255 Iteration 9, loss = 0.00845397 Iteration 10, loss = 0.00757170 Iteration 11, loss = 0.00658686 Iteration 12, loss = 0.00587203 Iteration 13, loss = 0.00521823 Iteration 14, loss = 0.00484749 Iteration 15, loss = 0.00450956 Iteration 16, loss = 0.00414791 Iteration 17, loss = 0.00382595 Iteration 18, loss = 0.00373950 Iteration 19, loss = 0.00363424 Iteration 20, loss = 0.00332075 Iteration 21, loss = 0.00312756 Iteration 22, loss = 0.00297826 Iteration 23, loss = 0.00281087 Iteration 24, loss = 0.00284578 Iteration 25, loss = 0.00288343 Iteration 26, loss = 0.00257156 Iteration 27, loss = 0.00238000 Iteration 28, loss = 0.00231150 Iteration 29, loss = 0.00223674 Iteration 30, loss = 0.00217178 Iteration 31, loss = 0.00206873 Iteration 32, loss = 0.00202993 Iteration 33, loss = 0.00192828 Iteration 34, loss = 0.00194382 Iteration 35, loss = 0.00180230 Iteration 36, loss = 0.00175147 Iteration 37, loss = 0.00171618 Iteration 38, loss = 0.00165323 Iteration 39, loss = 0.00160145 Iteration 40, loss = 0.00159829 Iteration 41, loss = 0.00151807 Iteration 42, loss = 0.00149217 Iteration 43, loss = 0.00141839 Iteration 44, loss = 0.00137982 Iteration 45, loss = 0.00134630 Iteration 46, loss = 0.00134044 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.24695607 Iteration 2, loss = 0.07402224 Iteration 3, loss = 0.03492697 Iteration 4, loss = 0.02175852 Iteration 5, loss = 0.01526931 Iteration 6, loss = 0.01187255 Iteration 7, loss = 0.00945178 Iteration 8, loss = 0.00809715 Iteration 9, loss = 0.00668609 Iteration 10, loss = 0.00582030 Iteration 11, loss = 0.00525642 Iteration 12, loss = 0.00475675 Iteration 13, loss = 0.00439259 Iteration 14, loss = 0.00387382 Iteration 15, loss = 0.00374750 Iteration 16, loss = 0.00340483 Iteration 17, loss = 0.00319864 Iteration 18, loss = 0.00296946 Iteration 19, loss = 0.00284146 Iteration 20, loss = 0.00256853 Iteration 21, loss = 0.00247069 Iteration 22, loss = 0.00234314 Iteration 23, loss = 0.00226402 Iteration 24, loss = 0.00216063 Iteration 25, loss = 0.00205061 Iteration 26, loss = 0.00192680 Iteration 27, loss = 0.00183795 Iteration 28, loss = 0.00187789 Iteration 29, loss = 0.00178526 Iteration 30, loss = 0.00160876 Iteration 31, loss = 0.00157939 Iteration 32, loss = 0.00149567 Iteration 33, loss = 0.00145763 Iteration 34, loss = 0.00141155 Iteration 35, loss = 0.00148128 Iteration 36, loss = 0.00158024 Iteration 37, loss = 0.00127579 Iteration 38, loss = 0.00122294 Iteration 39, loss = 0.00117320 Iteration 40, loss = 0.00117576 Iteration 41, loss = 0.00124603 Iteration 42, loss = 0.00142904 Iteration 43, loss = 0.00107118 Iteration 44, loss = 0.00101852 Iteration 45, loss = 0.00097253 Iteration 46, loss = 0.00094912 Iteration 47, loss = 0.00091295 Iteration 48, loss = 0.00088915 Iteration 49, loss = 0.00089587 Iteration 50, loss = 0.00084435 Iteration 51, loss = 0.00083526 Iteration 52, loss = 0.00079993 Iteration 53, loss = 0.00078765 Iteration 54, loss = 0.00077506 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.39580473 Iteration 2, loss = 0.12221844 Iteration 3, loss = 0.05757230 Iteration 4, loss = 0.03458829 Iteration 5, loss = 0.02379446 Iteration 6, loss = 0.01793942 Iteration 7, loss = 0.01378491 Iteration 8, loss = 0.01109918 Iteration 9, loss = 0.00935382 Iteration 10, loss = 0.00806478 Iteration 11, loss = 0.00710597 Iteration 12, loss = 0.00609116 Iteration 13, loss = 0.00552244 Iteration 14, loss = 0.00504001 Iteration 15, loss = 0.00457218 Iteration 16, loss = 0.00431994 Iteration 17, loss = 0.00409668 Iteration 18, loss = 0.00367263 Iteration 19, loss = 0.00344563 Iteration 20, loss = 0.00329224 Iteration 21, loss = 0.00317946 Iteration 22, loss = 0.00289067 Iteration 23, loss = 0.00277723 Iteration 24, loss = 0.00264739 Iteration 25, loss = 0.00250835 Iteration 26, loss = 0.00241014 Iteration 27, loss = 0.00231816 Iteration 28, loss = 0.00225493 Iteration 29, loss = 0.00225344 Iteration 30, loss = 0.00222221 Iteration 31, loss = 0.00203158 Iteration 32, loss = 0.00198577 Iteration 33, loss = 0.00185422 Iteration 34, loss = 0.00180006 Iteration 35, loss = 0.00176053 Iteration 36, loss = 0.00175270 Iteration 37, loss = 0.00163675 Iteration 38, loss = 0.00164595 Iteration 39, loss = 0.00153804 Iteration 40, loss = 0.00149604 Iteration 41, loss = 0.00144774 Iteration 42, loss = 0.00145081 Iteration 43, loss = 0.00148038 Iteration 44, loss = 0.00136660 Iteration 45, loss = 0.00132129 Iteration 46, loss = 0.00126742 Iteration 47, loss = 0.00124887 Iteration 48, loss = 0.00120267 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.29087435 Iteration 2, loss = 0.09507314 Iteration 3, loss = 0.04644131 Iteration 4, loss = 0.02862173 Iteration 5, loss = 0.01993382 Iteration 6, loss = 0.01516891 Iteration 7, loss = 0.01184282 Iteration 8, loss = 0.00997011 Iteration 9, loss = 0.00821427 Iteration 10, loss = 0.00721774 Iteration 11, loss = 0.00634240 Iteration 12, loss = 0.00580106 Iteration 13, loss = 0.00530730 Iteration 14, loss = 0.00495430 Iteration 15, loss = 0.00461570 Iteration 16, loss = 0.00413834 Iteration 17, loss = 0.00381900 Iteration 18, loss = 0.00373748 Iteration 19, loss = 0.00336528 Iteration 20, loss = 0.00327899 Iteration 21, loss = 0.00333467 Iteration 22, loss = 0.00290886 Iteration 23, loss = 0.00313418 Iteration 24, loss = 0.00281496 Iteration 25, loss = 0.00254115 Iteration 26, loss = 0.00247606 Iteration 27, loss = 0.00231425 Iteration 28, loss = 0.00243735 Iteration 29, loss = 0.00227601 Iteration 30, loss = 0.00204523 Iteration 31, loss = 0.00213276 Iteration 32, loss = 0.00188604 Iteration 33, loss = 0.00183932 Iteration 34, loss = 0.00180062 Iteration 35, loss = 0.00181769 Iteration 36, loss = 0.00161955 Iteration 37, loss = 0.00164845 Iteration 38, loss = 0.00154021 Iteration 39, loss = 0.00146064 Iteration 40, loss = 0.00146501 Iteration 41, loss = 0.00152729 Iteration 42, loss = 0.00131970 Iteration 43, loss = 0.00128741 Iteration 44, loss = 0.00122567 Iteration 45, loss = 0.00123659 Iteration 46, loss = 0.00131854 Iteration 47, loss = 0.00142927 Iteration 48, loss = 0.00111923 Iteration 49, loss = 0.00107913 Iteration 50, loss = 0.00103288 Iteration 51, loss = 0.00103649 Iteration 52, loss = 0.00101883 Iteration 53, loss = 0.00096397 Iteration 54, loss = 0.00097389 Iteration 55, loss = 0.00097270 Iteration 56, loss = 0.00089544 Iteration 57, loss = 0.00094546 Iteration 58, loss = 0.00085035 Iteration 59, loss = 0.00084733 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.61524768 Iteration 2, loss = 0.21070558 Iteration 3, loss = 0.09798027 Iteration 4, loss = 0.05597422 Iteration 5, loss = 0.03658723 Iteration 6, loss = 0.02645535 Iteration 7, loss = 0.01989459 Iteration 8, loss = 0.01585982 Iteration 9, loss = 0.01345510 Iteration 10, loss = 0.01148974 Iteration 11, loss = 0.00994574 Iteration 12, loss = 0.00856623 Iteration 13, loss = 0.00766994 Iteration 14, loss = 0.00720453 Iteration 15, loss = 0.00636266 Iteration 16, loss = 0.00584951 Iteration 17, loss = 0.00531695 Iteration 18, loss = 0.00495954 Iteration 19, loss = 0.00463689 Iteration 20, loss = 0.00435733 Iteration 21, loss = 0.00410732 Iteration 22, loss = 0.00395066 Iteration 23, loss = 0.00374051 Iteration 24, loss = 0.00358701 Iteration 25, loss = 0.00332498 Iteration 26, loss = 0.00319617 Iteration 27, loss = 0.00313775 Iteration 28, loss = 0.00299078 Iteration 29, loss = 0.00290128 Iteration 30, loss = 0.00266989 Iteration 31, loss = 0.00260870 Iteration 32, loss = 0.00258053 Iteration 33, loss = 0.00245343 Iteration 34, loss = 0.00237858 Iteration 35, loss = 0.00228019 Iteration 36, loss = 0.00217660 Iteration 37, loss = 0.00214719 Iteration 38, loss = 0.00208568 Iteration 39, loss = 0.00219418 Iteration 40, loss = 0.00196055 Iteration 41, loss = 0.00188493 Iteration 42, loss = 0.00187186 Iteration 43, loss = 0.00180652 Iteration 44, loss = 0.00171574 Iteration 45, loss = 0.00170966 Iteration 46, loss = 0.00171342 Iteration 47, loss = 0.00167268 Iteration 48, loss = 0.00155012 Iteration 49, loss = 0.00150122 Iteration 50, loss = 0.00150451 Iteration 51, loss = 0.00142172 Iteration 52, loss = 0.00140193 Iteration 53, loss = 0.00143606 Iteration 54, loss = 0.00135514 Iteration 55, loss = 0.00128307 Iteration 56, loss = 0.00129536 Iteration 57, loss = 0.00126215 Iteration 58, loss = 0.00128168 Iteration 59, loss = 0.00121439 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.53099495 Iteration 2, loss = 0.16834362 Iteration 3, loss = 0.07707742 Iteration 4, loss = 0.04487203 Iteration 5, loss = 0.02985264 Iteration 6, loss = 0.02163596 Iteration 7, loss = 0.01644124 Iteration 8, loss = 0.01346382 Iteration 9, loss = 0.01074006 Iteration 10, loss = 0.00911208 Iteration 11, loss = 0.00759783 Iteration 12, loss = 0.00654782 Iteration 13, loss = 0.00581438 Iteration 14, loss = 0.00509116 Iteration 15, loss = 0.00444049 Iteration 16, loss = 0.00416677 Iteration 17, loss = 0.00381525 Iteration 18, loss = 0.00354818 Iteration 19, loss = 0.00314906 Iteration 20, loss = 0.00284819 Iteration 21, loss = 0.00262662 Iteration 22, loss = 0.00256974 Iteration 23, loss = 0.00212971 Iteration 24, loss = 0.00220955 Iteration 25, loss = 0.00196568 Iteration 26, loss = 0.00183184 Iteration 27, loss = 0.00182532 Iteration 28, loss = 0.00168778 Iteration 29, loss = 0.00159808 Iteration 30, loss = 0.00147955 Iteration 31, loss = 0.00150443 Iteration 32, loss = 0.00175148 Iteration 33, loss = 0.00138038 Iteration 34, loss = 0.00128428 Iteration 35, loss = 0.00129726 Iteration 36, loss = 0.00117653 Iteration 37, loss = 0.00126921 Iteration 38, loss = 0.00122568 Iteration 39, loss = 0.00100942 Iteration 40, loss = 0.00101820 Iteration 41, loss = 0.00093545 Iteration 42, loss = 0.00091329 Iteration 43, loss = 0.00090897 Iteration 44, loss = 0.00086998 Iteration 45, loss = 0.00084126 Iteration 46, loss = 0.00084673 Iteration 47, loss = 0.00079594 Iteration 48, loss = 0.00081714 Iteration 49, loss = 0.00085966 Iteration 50, loss = 0.00097716 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.37967585 Iteration 2, loss = 0.11899063 Iteration 3, loss = 0.05587287 Iteration 4, loss = 0.03348856 Iteration 5, loss = 0.02341256 Iteration 6, loss = 0.01739331 Iteration 7, loss = 0.01376755 Iteration 8, loss = 0.01134166 Iteration 9, loss = 0.00992975 Iteration 10, loss = 0.00854870 Iteration 11, loss = 0.00769436 Iteration 12, loss = 0.00664884 Iteration 13, loss = 0.00596861 Iteration 14, loss = 0.00539915 Iteration 15, loss = 0.00512861 Iteration 16, loss = 0.00473986 Iteration 17, loss = 0.00454538 Iteration 18, loss = 0.00409454 Iteration 19, loss = 0.00397325 Iteration 20, loss = 0.00378542 Iteration 21, loss = 0.00341513 Iteration 22, loss = 0.00334408 Iteration 23, loss = 0.00306420 Iteration 24, loss = 0.00282787 Iteration 25, loss = 0.00291739 Iteration 26, loss = 0.00267871 Iteration 27, loss = 0.00288406 Iteration 28, loss = 0.00259212 Iteration 29, loss = 0.00238620 Iteration 30, loss = 0.00225283 Iteration 31, loss = 0.00222106 Iteration 32, loss = 0.00204585 Iteration 33, loss = 0.00200741 Iteration 34, loss = 0.00189925 Iteration 35, loss = 0.00184412 Iteration 36, loss = 0.00187487 Iteration 37, loss = 0.00173929 Iteration 38, loss = 0.00175915 Iteration 39, loss = 0.00166170 Iteration 40, loss = 0.00161141 Iteration 41, loss = 0.00150002 Iteration 42, loss = 0.00145140 Iteration 43, loss = 0.00142174 Iteration 44, loss = 0.00137832 Iteration 45, loss = 0.00132625 Iteration 46, loss = 0.00128213 Iteration 47, loss = 0.00125148 Iteration 48, loss = 0.00123338 Iteration 49, loss = 0.00118743 Iteration 50, loss = 0.00113020 Iteration 51, loss = 0.00112355 Iteration 52, loss = 0.00114335 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.40939202 Iteration 2, loss = 0.12331034 Iteration 3, loss = 0.05883929 Iteration 4, loss = 0.03565818 Iteration 5, loss = 0.02454784 Iteration 6, loss = 0.01814149 Iteration 7, loss = 0.01396706 Iteration 8, loss = 0.01125701 Iteration 9, loss = 0.00938046 Iteration 10, loss = 0.00794951 Iteration 11, loss = 0.00694482 Iteration 12, loss = 0.00615874 Iteration 13, loss = 0.00552918 Iteration 14, loss = 0.00491734 Iteration 15, loss = 0.00451767 Iteration 16, loss = 0.00416120 Iteration 17, loss = 0.00383665 Iteration 18, loss = 0.00356606 Iteration 19, loss = 0.00335691 Iteration 20, loss = 0.00312591 Iteration 21, loss = 0.00297312 Iteration 22, loss = 0.00278652 Iteration 23, loss = 0.00265737 Iteration 24, loss = 0.00252426 Iteration 25, loss = 0.00241456 Iteration 26, loss = 0.00230822 Iteration 27, loss = 0.00222251 Iteration 28, loss = 0.00211977 Iteration 29, loss = 0.00203588 Iteration 30, loss = 0.00195713 Iteration 31, loss = 0.00189782 Iteration 32, loss = 0.00181973 Iteration 33, loss = 0.00174848 Iteration 34, loss = 0.00168901 Iteration 35, loss = 0.00164055 Iteration 36, loss = 0.00158159 Iteration 37, loss = 0.00153606 Iteration 38, loss = 0.00148169 Iteration 39, loss = 0.00143783 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.23029891 Iteration 2, loss = 0.06855063 Iteration 3, loss = 0.03342798 Iteration 4, loss = 0.02111881 Iteration 5, loss = 0.01450045 Iteration 6, loss = 0.01097309 Iteration 7, loss = 0.00895842 Iteration 8, loss = 0.00739554 Iteration 9, loss = 0.00646007 Iteration 10, loss = 0.00603067 Iteration 11, loss = 0.00493242 Iteration 12, loss = 0.00438719 Iteration 13, loss = 0.00443028 Iteration 14, loss = 0.00402486 Iteration 15, loss = 0.00352942 Iteration 16, loss = 0.00323963 Iteration 17, loss = 0.00306054 Iteration 18, loss = 0.00281214 Iteration 19, loss = 0.00265188 Iteration 20, loss = 0.00267405 Iteration 21, loss = 0.00242669 Iteration 22, loss = 0.00237577 Iteration 23, loss = 0.00226336 Iteration 24, loss = 0.00211848 Iteration 25, loss = 0.00199332 Iteration 26, loss = 0.00191613 Iteration 27, loss = 0.00179303 Iteration 28, loss = 0.00174197 Iteration 29, loss = 0.00168644 Iteration 30, loss = 0.00167513 Iteration 31, loss = 0.00152890 Iteration 32, loss = 0.00150593 Iteration 33, loss = 0.00147807 Iteration 34, loss = 0.00147188 Iteration 35, loss = 0.00132322 Iteration 36, loss = 0.00131263 Iteration 37, loss = 0.00123896 Iteration 38, loss = 0.00124147 Iteration 39, loss = 0.00118425 Iteration 40, loss = 0.00112786 Iteration 41, loss = 0.00111344 Iteration 42, loss = 0.00109627 Iteration 43, loss = 0.00108500 Iteration 44, loss = 0.00098892 Iteration 45, loss = 0.00102053 Iteration 46, loss = 0.00094114 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.42553645 Iteration 2, loss = 0.13681435 Iteration 3, loss = 0.06454372 Iteration 4, loss = 0.03806576 Iteration 5, loss = 0.02566133 Iteration 6, loss = 0.01887258 Iteration 7, loss = 0.01472968 Iteration 8, loss = 0.01180981 Iteration 9, loss = 0.01006978 Iteration 10, loss = 0.00836706 Iteration 11, loss = 0.00735475 Iteration 12, loss = 0.00669962 Iteration 13, loss = 0.00578602 Iteration 14, loss = 0.00528695 Iteration 15, loss = 0.00511676 Iteration 16, loss = 0.00445553 Iteration 17, loss = 0.00417830 Iteration 18, loss = 0.00383751 Iteration 19, loss = 0.00366418 Iteration 20, loss = 0.00340188 Iteration 21, loss = 0.00335812 Iteration 22, loss = 0.00307669 Iteration 23, loss = 0.00297604 Iteration 24, loss = 0.00295194 Iteration 25, loss = 0.00270392 Iteration 26, loss = 0.00254937 Iteration 27, loss = 0.00244438 Iteration 28, loss = 0.00248636 Iteration 29, loss = 0.00245201 Iteration 30, loss = 0.00222895 Iteration 31, loss = 0.00221893 Iteration 32, loss = 0.00210624 Iteration 33, loss = 0.00196064 Iteration 34, loss = 0.00191047 Iteration 35, loss = 0.00185342 Iteration 36, loss = 0.00180288 Iteration 37, loss = 0.00180908 Iteration 38, loss = 0.00175167 Iteration 39, loss = 0.00190290 Iteration 40, loss = 0.00177140 Iteration 41, loss = 0.00194327 Iteration 42, loss = 0.00156667 Iteration 43, loss = 0.00142927 Iteration 44, loss = 0.00145853 Iteration 45, loss = 0.00135653 Iteration 46, loss = 0.00132539 Iteration 47, loss = 0.00129060 Iteration 48, loss = 0.00125566 Iteration 49, loss = 0.00121063 Iteration 50, loss = 0.00118722 Iteration 51, loss = 0.00116552 Iteration 52, loss = 0.00117042 Iteration 53, loss = 0.00118707 Iteration 54, loss = 0.00124804 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.53932917 Iteration 2, loss = 0.16826758 Iteration 3, loss = 0.08098238 Iteration 4, loss = 0.04869194 Iteration 5, loss = 0.03338692 Iteration 6, loss = 0.02489643 Iteration 7, loss = 0.01934216 Iteration 8, loss = 0.01573860 Iteration 9, loss = 0.01310461 Iteration 10, loss = 0.01139325 Iteration 11, loss = 0.00977495 Iteration 12, loss = 0.00853249 Iteration 13, loss = 0.00751224 Iteration 14, loss = 0.00698677 Iteration 15, loss = 0.00618542 Iteration 16, loss = 0.00597622 Iteration 17, loss = 0.00511614 Iteration 18, loss = 0.00468071 Iteration 19, loss = 0.00435475 Iteration 20, loss = 0.00410003 Iteration 21, loss = 0.00390341 Iteration 22, loss = 0.00354473 Iteration 23, loss = 0.00336072 Iteration 24, loss = 0.00319534 Iteration 25, loss = 0.00304222 Iteration 26, loss = 0.00298031 Iteration 27, loss = 0.00279928 Iteration 28, loss = 0.00266969 Iteration 29, loss = 0.00260955 Iteration 30, loss = 0.00245568 Iteration 31, loss = 0.00235874 Iteration 32, loss = 0.00225844 Iteration 33, loss = 0.00221987 Iteration 34, loss = 0.00209950 Iteration 35, loss = 0.00204226 Iteration 36, loss = 0.00198329 Iteration 37, loss = 0.00195378 Iteration 38, loss = 0.00185908 Iteration 39, loss = 0.00179795 Iteration 40, loss = 0.00176425 Iteration 41, loss = 0.00171951 Iteration 42, loss = 0.00168425 Iteration 43, loss = 0.00164080 Iteration 44, loss = 0.00159745 Iteration 45, loss = 0.00158656 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.49628182 Iteration 2, loss = 0.16156376 Iteration 3, loss = 0.07701924 Iteration 4, loss = 0.04591222 Iteration 5, loss = 0.03090675 Iteration 6, loss = 0.02280796 Iteration 7, loss = 0.01760654 Iteration 8, loss = 0.01448797 Iteration 9, loss = 0.01200884 Iteration 10, loss = 0.01018731 Iteration 11, loss = 0.00872459 Iteration 12, loss = 0.00778256 Iteration 13, loss = 0.00686538 Iteration 14, loss = 0.00624853 Iteration 15, loss = 0.00578042 Iteration 16, loss = 0.00544442 Iteration 17, loss = 0.00485022 Iteration 18, loss = 0.00465270 Iteration 19, loss = 0.00433427 Iteration 20, loss = 0.00411399 Iteration 21, loss = 0.00392794 Iteration 22, loss = 0.00355885 Iteration 23, loss = 0.00347893 Iteration 24, loss = 0.00342089 Iteration 25, loss = 0.00306147 Iteration 26, loss = 0.00301672 Iteration 27, loss = 0.00283762 Iteration 28, loss = 0.00272639 Iteration 29, loss = 0.00258067 Iteration 30, loss = 0.00256170 Iteration 31, loss = 0.00238297 Iteration 32, loss = 0.00228627 Iteration 33, loss = 0.00223422 Iteration 34, loss = 0.00217257 Iteration 35, loss = 0.00210892 Iteration 36, loss = 0.00197992 Iteration 37, loss = 0.00189126 Iteration 38, loss = 0.00198525 Iteration 39, loss = 0.00193994 Iteration 40, loss = 0.00179163 Iteration 41, loss = 0.00171811 Iteration 42, loss = 0.00162635 Iteration 43, loss = 0.00159355 Iteration 44, loss = 0.00155769 Iteration 45, loss = 0.00163678 Iteration 46, loss = 0.00145906 Iteration 47, loss = 0.00141100 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.40448948 Iteration 2, loss = 0.12090072 Iteration 3, loss = 0.05634068 Iteration 4, loss = 0.03353140 Iteration 5, loss = 0.02311858 Iteration 6, loss = 0.01716675 Iteration 7, loss = 0.01339943 Iteration 8, loss = 0.01087742 Iteration 9, loss = 0.00898665 Iteration 10, loss = 0.00762023 Iteration 11, loss = 0.00658180 Iteration 12, loss = 0.00590870 Iteration 13, loss = 0.00521846 Iteration 14, loss = 0.00482307 Iteration 15, loss = 0.00449176 Iteration 16, loss = 0.00395750 Iteration 17, loss = 0.00360257 Iteration 18, loss = 0.00340149 Iteration 19, loss = 0.00313288 Iteration 20, loss = 0.00291328 Iteration 21, loss = 0.00276232 Iteration 22, loss = 0.00269986 Iteration 23, loss = 0.00271377 Iteration 24, loss = 0.00245138 Iteration 25, loss = 0.00235680 Iteration 26, loss = 0.00227161 Iteration 27, loss = 0.00219718 Iteration 28, loss = 0.00220447 Iteration 29, loss = 0.00196445 Iteration 30, loss = 0.00191597 Iteration 31, loss = 0.00178252 Iteration 32, loss = 0.00181062 Iteration 33, loss = 0.00166587 Iteration 34, loss = 0.00162905 Iteration 35, loss = 0.00157643 Iteration 36, loss = 0.00152867 Iteration 37, loss = 0.00146213 Iteration 38, loss = 0.00141041 Iteration 39, loss = 0.00138803 Iteration 40, loss = 0.00140321 Iteration 41, loss = 0.00134414 Iteration 42, loss = 0.00140092 Iteration 43, loss = 0.00126552 Iteration 44, loss = 0.00123025 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.66838332 Iteration 2, loss = 0.22571017 Iteration 3, loss = 0.10586193 Iteration 4, loss = 0.06125339 Iteration 5, loss = 0.04028353 Iteration 6, loss = 0.02882490 Iteration 7, loss = 0.02167823 Iteration 8, loss = 0.01721267 Iteration 9, loss = 0.01407135 Iteration 10, loss = 0.01182015 Iteration 11, loss = 0.01008687 Iteration 12, loss = 0.00870835 Iteration 13, loss = 0.00771248 Iteration 14, loss = 0.00679108 Iteration 15, loss = 0.00611009 Iteration 16, loss = 0.00557664 Iteration 17, loss = 0.00506134 Iteration 18, loss = 0.00474014 Iteration 19, loss = 0.00447654 Iteration 20, loss = 0.00407852 Iteration 21, loss = 0.00374629 Iteration 22, loss = 0.00357844 Iteration 23, loss = 0.00335767 Iteration 24, loss = 0.00321693 Iteration 25, loss = 0.00297353 Iteration 26, loss = 0.00285144 Iteration 27, loss = 0.00277079 Iteration 28, loss = 0.00260786 Iteration 29, loss = 0.00262931 Iteration 30, loss = 0.00241896 Iteration 31, loss = 0.00237483 Iteration 32, loss = 0.00223492 Iteration 33, loss = 0.00213967 Iteration 34, loss = 0.00211972 Iteration 35, loss = 0.00199517 Iteration 36, loss = 0.00194735 Iteration 37, loss = 0.00194069 Iteration 38, loss = 0.00196096 Iteration 39, loss = 0.00181776 Iteration 40, loss = 0.00180635 Iteration 41, loss = 0.00166417 Iteration 42, loss = 0.00165365 Iteration 43, loss = 0.00160726 Iteration 44, loss = 0.00157579 Iteration 45, loss = 0.00153573 Iteration 46, loss = 0.00147251 Iteration 47, loss = 0.00144347 Iteration 48, loss = 0.00145807 Iteration 49, loss = 0.00140479 Iteration 50, loss = 0.00138566 Iteration 51, loss = 0.00131388 Iteration 52, loss = 0.00129925 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping. Iteration 1, loss = 0.45319033 Iteration 2, loss = 0.14174527 Iteration 3, loss = 0.06525396 Iteration 4, loss = 0.03726888 Iteration 5, loss = 0.02477762 Iteration 6, loss = 0.01794069 Iteration 7, loss = 0.01360795 Iteration 8, loss = 0.01100233 Iteration 9, loss = 0.00895235 Iteration 10, loss = 0.00751053 Iteration 11, loss = 0.00647409 Iteration 12, loss = 0.00569825 Iteration 13, loss = 0.00507688 Iteration 14, loss = 0.00457125 Iteration 15, loss = 0.00418234 Iteration 16, loss = 0.00383871 Iteration 17, loss = 0.00355120 Iteration 18, loss = 0.00331552 Iteration 19, loss = 0.00309670 Iteration 20, loss = 0.00292496 Iteration 21, loss = 0.00276841 Iteration 22, loss = 0.00261738 Iteration 23, loss = 0.00249186 Iteration 24, loss = 0.00238180 Iteration 25, loss = 0.00226874 Iteration 26, loss = 0.00217935 Iteration 27, loss = 0.00208579 Iteration 28, loss = 0.00201197 Iteration 29, loss = 0.00193708 Iteration 30, loss = 0.00186893 Iteration 31, loss = 0.00180272 Iteration 32, loss = 0.00174520 Iteration 33, loss = 0.00169376 Iteration 34, loss = 0.00164261 Iteration 35, loss = 0.00158940 Iteration 36, loss = 0.00154742 Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping.
| precision | recall | f1-score | support | |
|---|---|---|---|---|
| CV = 20 | ||||
| Normal | 0.9990 ± 0.0008 | 0.9999 ± 0.0003 | 0.9995 ± 0.0004 | 853.0000 ± 0.0000 |
| Fraud | 0.8250 ± 0.3631 | 0.5750 ± 0.3269 | 0.6583 ± 0.3183 | 2.0000 ± 0.0000 |
| accuracy | 0.9989 ± 0.0008 | 0.9989 ± 0.0008 | 0.9989 ± 0.0008 | 0.9989 ± 0.0008 |
| macro avg | 0.9120 ± 0.1819 | 0.7875 ± 0.1635 | 0.8289 ± 0.1593 | 855.0000 ± 0.0000 |
| weighted avg | 0.9986 ± 0.0015 | 0.9989 ± 0.0008 | 0.9987 ± 0.0011 | 855.0000 ± 0.0000 |
====================================================================================================
In the next article, we try to improve these results using PyTorch MLP.